diff -Nru bcfg2-1.3.5/COPYRIGHT bcfg2-1.4.0~pre2+git141-g6d40dace6358/COPYRIGHT --- bcfg2-1.3.5/COPYRIGHT 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/COPYRIGHT 2017-01-10 19:18:17.000000000 +0000 @@ -157,7 +157,8 @@ - Zach Lowry wrote Solaris support and general hardening. -- Michael Fenn fixed various small bugs +- Michael Fenn implemented the database router + for separately storing the reporting database and fixed various small bugs related to bcfg2 on CentOS 5 - Alexander Sulfrian fixed various bugs. diff -Nru bcfg2-1.3.5/debian/bcfg2.install bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/bcfg2.install --- bcfg2-1.3.5/debian/bcfg2.install 2015-10-24 22:13:31.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/bcfg2.install 2017-01-17 21:50:39.000000000 +0000 @@ -1,6 +1,7 @@ debian/tmp/usr/bin/bcfg2 usr/sbin debian/tmp/usr/lib/python*/*-packages/Bcfg2/*.py debian/tmp/usr/lib/python*/*-packages/Bcfg2/Client/* +debian/tmp/usr/lib/python*/*-packages/Bcfg2/Options/* debian/tmp/usr/share/man/man1/* debian/tmp/usr/share/man/man5/* examples/bcfg2.conf usr/share/bcfg2 diff -Nru bcfg2-1.3.5/debian/bcfg2-server.install bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/bcfg2-server.install --- bcfg2-1.3.5/debian/bcfg2-server.install 2015-10-24 22:13:31.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/bcfg2-server.install 2017-01-17 21:50:39.000000000 +0000 @@ -2,7 +2,6 @@ debian/tmp/usr/bin/bcfg2-* usr/sbin debian/tmp/usr/lib/python*/*-packages/Bcfg2/Server/* debian/tmp/usr/lib/python*/*-packages/Bcfg2/Reporting/* -debian/tmp/usr/share/bcfg2/Hostbase/* debian/tmp/usr/share/bcfg2/schemas/* debian/tmp/usr/share/bcfg2/xsl-transforms/* debian/tmp/usr/share/man/man8/* diff -Nru bcfg2-1.3.5/debian/bcfg2-server.logcheck.ignore.server bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/bcfg2-server.logcheck.ignore.server --- bcfg2-1.3.5/debian/bcfg2-server.logcheck.ignore.server 2015-10-24 22:13:31.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/bcfg2-server.logcheck.ignore.server 2017-01-17 21:50:39.000000000 +0000 @@ -1,4 +1,4 @@ -^\w{3} [ :0-9]{11} [._[:alnum:]-]+ bcfg2-server\[[0-9]+\]: Processed [0-9]+ (fam|gamin) events in [0-9.]+ seconds\. [0-9]+ coalesced$ +^\w{3} [ :0-9]{11} [._[:alnum:]-]+ bcfg2-server\[[0-9]+\]: Handled [0-9]+ events in [0-9.]+ ^\w{3} [ :0-9]{11} [._[:alnum:]-]+ bcfg2-server\[[0-9]+\]: Generated config for [._[:alnum:]-]+ in [0-9.]+ s$ ^\w{3} [ :0-9]{11} [._[:alnum:]-]+ bcfg2-server\[[0-9]+\]: Client [._[:alnum:]-]+ reported state (clean|dirty)$ ^\w{3} [ :0-9]{11} [._[:alnum:]-]+ bcfg2-server\[[0-9]+\]: Suppressing event for bogus file .*$ diff -Nru bcfg2-1.3.5/debian/changelog bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/changelog --- bcfg2-1.3.5/debian/changelog 2016-01-01 17:49:54.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/changelog 2017-08-10 23:03:16.000000000 +0000 @@ -1,3 +1,52 @@ +bcfg2 (1.4.0~pre2+git141-g6d40dace6358-1ubuntu1) artful; urgency=low + + * Merge from Debian unstable. Remaining changes: + - debian/control: + + Recommends: Move graphviz to Suggests (avoid libX11 on a server). + + Suggests: Move cheetah to Recommends (for templating). + * Dropped changes, superseded in Debian: + - genshi now a Depends, neither a Recommends nor Suggests + * Dropped changes, included upstream: + - debian/patches/1000-nagiosgen-ipv6.patch: + + Fix NagiosGen with IPv6 hosts. + - debian/patches/1001-nagiosgen-sort-groups.patch: + + NagiosGen: Sort the hostgroup list. + + -- Steve Langasek Thu, 10 Aug 2017 16:03:16 -0700 + +bcfg2 (1.4.0~pre2+git141-g6d40dace6358-1) unstable; urgency=medium + + * New upstream git snapshot for more Django 1.9 and 1.10 fixes + + -- Arto Jantunen Tue, 17 Jan 2017 19:33:42 +0200 + +bcfg2 (1.4.0~pre2+git128-g9797b170668a-1) unstable; urgency=medium + + * New upstream git snapshot containing fixes to run on Django 1.9 + * South isn't required when using Django 1.7 or later (Closes: #807998) + * Use https url's in Vcs fields + * Bump Standards-Version to 3.9.8.0, no changes + + -- Arto Jantunen Sun, 25 Dec 2016 11:38:11 +0200 + +bcfg2 (1.4.0~pre1+git18-gea63477-1) experimental; urgency=medium + + * New upstream git snapshot containing bug fixes for config option + handling, logging, etc. + + -- Arto Jantunen Sat, 02 Aug 2014 14:09:53 +0300 + +bcfg2 (1.4.0~pre1-1) experimental; urgency=low + + * New upstream version 1.4.0~pre1 + * Note the removal of the Hostbase plugin in bcfg2-server.install + * Sqlalchemy is no longer used, remove the dependency + * Include the new Options module in the bcfg2 package + * Move python-genshi from Suggest to Depends, it's required since + upstream commit cfa769a + + -- Arto Jantunen Tue, 15 Jul 2014 10:35:57 +0300 + bcfg2 (1.3.5-4ubuntu1) xenial; urgency=low * Merge from Debian unstable. Remaining changes: diff -Nru bcfg2-1.3.5/debian/control bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/control --- bcfg2-1.3.5/debian/control 2015-10-24 22:13:31.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/control 2017-08-10 23:03:16.000000000 +0000 @@ -19,11 +19,11 @@ python-mock-doc, dh-apache2, dh-systemd -Standards-Version: 3.9.5.0 +Standards-Version: 3.9.8.0 Homepage: http://bcfg2.org/ X-Python-Version: >= 2.6 -Vcs-Browser: http://anonscm.debian.org/gitweb/?p=collab-maint/bcfg2.git -Vcs-Git: git://anonscm.debian.org/collab-maint/bcfg2.git +Vcs-Browser: https://anonscm.debian.org/gitweb/?p=collab-maint/bcfg2.git +Vcs-Git: https://anonscm.debian.org/git/collab-maint/bcfg2.git Package: bcfg2 Architecture: all @@ -36,9 +36,9 @@ Package: bcfg2-server Architecture: all -Depends: ${python:Depends}, ${misc:Depends}, python-lxml (>= 0.9), libxml2-utils (>= 2.6.23), lsb-base (>= 3.1-9), ucf, bcfg2 (= ${binary:Version}), openssl, python-pyinotify | python-gamin, python-daemon -Recommends: patch, python-cheetah, python-genshi (>= 0.4.4) -Suggests: python-profiler, python-sqlalchemy (>= 0.5.0), python-django, mail-transport-agent, bcfg2-doc (= ${binary:Version}), graphviz +Depends: ${python:Depends}, ${misc:Depends}, python-lxml (>= 0.9), libxml2-utils (>= 2.6.23), lsb-base (>= 3.1-9), ucf, bcfg2 (= ${binary:Version}), openssl, python-pyinotify | python-gamin, python-daemon, python-genshi (>= 0.4.4) +Recommends: patch, python-cheetah +Suggests: python-profiler, python-django, mail-transport-agent, bcfg2-doc (= ${binary:Version}), graphviz Description: Configuration management server Bcfg2 is a configuration management system that generates configuration sets for clients bound by client profiles. @@ -47,7 +47,7 @@ Package: bcfg2-web Architecture: all -Depends: ${python:Depends}, ${misc:Depends}, bcfg2-server (= ${binary:Version}), python-django, python-django-south (>= 0.7.5) +Depends: ${python:Depends}, ${misc:Depends}, bcfg2-server (= ${binary:Version}), python-django (>= 1.7-1) Recommends: libapache2-mod-wsgi, ${misc:Recommends} Suggests: python-mysqldb, python-psycopg2, python-sqlite Description: Configuration management web interface diff -Nru bcfg2-1.3.5/debian/patches/0003-make-Bcfg2-compatible-with-django-1.7.patch bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/patches/0003-make-Bcfg2-compatible-with-django-1.7.patch --- bcfg2-1.3.5/debian/patches/0003-make-Bcfg2-compatible-with-django-1.7.patch 2015-10-24 22:13:31.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/patches/0003-make-Bcfg2-compatible-with-django-1.7.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,4456 +0,0 @@ -From: Jonas Jochmaring -Date: Fri, 22 May 2015 15:29:02 +0200 -Subject: make Bcfg2 compatible with django 1.7 - - - reports.wsgi uses get_wsgi_application() now - - old south-based migrations have been moved - - manage.py has been updated - - fix SOUTH_MIGRATION_MODULES value in settings.py - - fix django loading in bcfg2 server core - - fix django loading DjangoORM storage - - revert back to old django testrunner - - call django.setup in bcfg2-reports - - close the database connection after syncdb in BaseCore.__init__ - - fixed reports.wsgi script - - install south_migrations ---- - reports/reports.wsgi | 19 +- - setup.py | 1 + - src/lib/Bcfg2/Reporting/Storage/DjangoORM.py | 3 + - src/lib/Bcfg2/Reporting/migrations/0001_initial.py | 1006 +++++++++++--------- - .../migrations/0002_convert_perms_to_mode.py | 171 ---- - .../Reporting/migrations/0003_expand_hash_key.py | 180 ---- - .../migrations/0004_profile_can_be_null.py | 156 --- - .../migrations/0005_add_selinux_entry_support.py | 485 ---------- - .../0006_add_user_group_entry_support.py | 340 ------- - .../Reporting/south_migrations/0001_initial.py | 465 +++++++++ - .../south_migrations/0002_convert_perms_to_mode.py | 171 ++++ - .../south_migrations/0003_expand_hash_key.py | 180 ++++ - .../south_migrations/0004_profile_can_be_null.py | 156 +++ - .../0005_add_selinux_entry_support.py | 485 ++++++++++ - .../0006_add_user_group_entry_support.py | 340 +++++++ - .../Bcfg2/Reporting/south_migrations/__init__.py | 0 - src/lib/Bcfg2/Server/Admin/Reports.py | 14 +- - src/lib/Bcfg2/Server/Core.py | 4 + - src/lib/Bcfg2/manage.py | 27 +- - src/lib/Bcfg2/settings.py | 11 +- - src/sbin/bcfg2-reports | 3 + - 21 files changed, 2400 insertions(+), 1817 deletions(-) - delete mode 100644 src/lib/Bcfg2/Reporting/migrations/0002_convert_perms_to_mode.py - delete mode 100644 src/lib/Bcfg2/Reporting/migrations/0003_expand_hash_key.py - delete mode 100644 src/lib/Bcfg2/Reporting/migrations/0004_profile_can_be_null.py - delete mode 100644 src/lib/Bcfg2/Reporting/migrations/0005_add_selinux_entry_support.py - delete mode 100644 src/lib/Bcfg2/Reporting/migrations/0006_add_user_group_entry_support.py - create mode 100644 src/lib/Bcfg2/Reporting/south_migrations/0001_initial.py - create mode 100644 src/lib/Bcfg2/Reporting/south_migrations/0002_convert_perms_to_mode.py - create mode 100644 src/lib/Bcfg2/Reporting/south_migrations/0003_expand_hash_key.py - create mode 100644 src/lib/Bcfg2/Reporting/south_migrations/0004_profile_can_be_null.py - create mode 100644 src/lib/Bcfg2/Reporting/south_migrations/0005_add_selinux_entry_support.py - create mode 100644 src/lib/Bcfg2/Reporting/south_migrations/0006_add_user_group_entry_support.py - create mode 100644 src/lib/Bcfg2/Reporting/south_migrations/__init__.py - -diff --git a/reports/reports.wsgi b/reports/reports.wsgi -index 92401d7..75f1a73 100644 ---- a/reports/reports.wsgi -+++ b/reports/reports.wsgi -@@ -1,9 +1,18 @@ - import os - import Bcfg2.settings - os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.settings' --import django.core.handlers.wsgi -+import django - --def application(environ, start_response): -- if 'BCFG2_CONFIG_FILE' in environ: -- Bcfg2.settings.read_config(cfile=environ['BCFG2_CONFIG_FILE']) -- return django.core.handlers.wsgi.WSGIHandler()(environ, start_response) -+if django.VERSION[0] == 1 and django.VERSION[1] >= 7: -+ from django.core.wsgi import get_wsgi_application -+ def application(environ, start_response): -+ if 'BCFG2_CONFIG_FILE' in environ: -+ Bcfg2.settings.read_config(cfile=environ['BCFG2_CONFIG_FILE']) -+ return get_wsgi_application()(environ, start_response) -+ -+else: -+ import django.core.handlers.wsgi -+ def application(environ, start_response): -+ if 'BCFG2_CONFIG_FILE' in environ: -+ Bcfg2.settings.read_config(cfile=environ['BCFG2_CONFIG_FILE']) -+ return django.core.handlers.wsgi.WSGIHandler()(environ, start_response) -diff --git a/setup.py b/setup.py -index 59b1d65..6c9d1a1 100755 ---- a/setup.py -+++ b/setup.py -@@ -37,6 +37,7 @@ setup(name="Bcfg2", - "Bcfg2.Reporting.Storage", - "Bcfg2.Reporting.Transport", - "Bcfg2.Reporting.migrations", -+ "Bcfg2.Reporting.south_migrations", - "Bcfg2.Reporting.templatetags", - 'Bcfg2.Server', - "Bcfg2.Server.Admin", -diff --git a/src/lib/Bcfg2/Reporting/Storage/DjangoORM.py b/src/lib/Bcfg2/Reporting/Storage/DjangoORM.py -index 0bb3111..8eadb87 100644 ---- a/src/lib/Bcfg2/Reporting/Storage/DjangoORM.py -+++ b/src/lib/Bcfg2/Reporting/Storage/DjangoORM.py -@@ -19,6 +19,9 @@ from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned - from django.db.models import FieldDoesNotExist - from django.core.cache import cache - from django import db -+import django -+if django.VERSION[0] == 1 and django.VERSION[1] >= 7: -+ django.setup() - - #Used by GetCurrentEntry - import difflib -diff --git a/src/lib/Bcfg2/Reporting/migrations/0001_initial.py b/src/lib/Bcfg2/Reporting/migrations/0001_initial.py -index 609290e..aab0df6 100644 ---- a/src/lib/Bcfg2/Reporting/migrations/0001_initial.py -+++ b/src/lib/Bcfg2/Reporting/migrations/0001_initial.py -@@ -1,465 +1,543 @@ - # -*- coding: utf-8 -*- --import datetime --from south.db import db --from south.v2 import SchemaMigration --from django.db import models -- -- --class Migration(SchemaMigration): -- -- def forwards(self, orm): -- # Adding model 'Client' -- db.create_table('Reporting_client', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('creation', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128)), -- ('current_interaction', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='parent_client', null=True, to=orm['Reporting.Interaction'])), -- ('expiration', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), -- )) -- db.send_create_signal('Reporting', ['Client']) -- -- # Adding model 'Interaction' -- db.create_table('Reporting_interaction', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('client', self.gf('django.db.models.fields.related.ForeignKey')(related_name='interactions', to=orm['Reporting.Client'])), -- ('timestamp', self.gf('django.db.models.fields.DateTimeField')(db_index=True)), -- ('state', self.gf('django.db.models.fields.CharField')(max_length=32)), -- ('repo_rev_code', self.gf('django.db.models.fields.CharField')(max_length=64)), -- ('server', self.gf('django.db.models.fields.CharField')(max_length=256)), -- ('good_count', self.gf('django.db.models.fields.IntegerField')()), -- ('total_count', self.gf('django.db.models.fields.IntegerField')()), -- ('bad_count', self.gf('django.db.models.fields.IntegerField')(default=0)), -- ('modified_count', self.gf('django.db.models.fields.IntegerField')(default=0)), -- ('extra_count', self.gf('django.db.models.fields.IntegerField')(default=0)), -- ('profile', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['Reporting.Group'])), -- )) -- db.send_create_signal('Reporting', ['Interaction']) -- -- # Adding unique constraint on 'Interaction', fields ['client', 'timestamp'] -- db.create_unique('Reporting_interaction', ['client_id', 'timestamp']) -- -- # Adding M2M table for field actions on 'Interaction' -- db.create_table('Reporting_interaction_actions', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('actionentry', models.ForeignKey(orm['Reporting.actionentry'], null=False)) -- )) -- db.create_unique('Reporting_interaction_actions', ['interaction_id', 'actionentry_id']) -- -- # Adding M2M table for field packages on 'Interaction' -- db.create_table('Reporting_interaction_packages', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('packageentry', models.ForeignKey(orm['Reporting.packageentry'], null=False)) -- )) -- db.create_unique('Reporting_interaction_packages', ['interaction_id', 'packageentry_id']) -- -- # Adding M2M table for field paths on 'Interaction' -- db.create_table('Reporting_interaction_paths', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('pathentry', models.ForeignKey(orm['Reporting.pathentry'], null=False)) -- )) -- db.create_unique('Reporting_interaction_paths', ['interaction_id', 'pathentry_id']) -- -- # Adding M2M table for field services on 'Interaction' -- db.create_table('Reporting_interaction_services', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('serviceentry', models.ForeignKey(orm['Reporting.serviceentry'], null=False)) -- )) -- db.create_unique('Reporting_interaction_services', ['interaction_id', 'serviceentry_id']) -- -- # Adding M2M table for field failures on 'Interaction' -- db.create_table('Reporting_interaction_failures', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('failureentry', models.ForeignKey(orm['Reporting.failureentry'], null=False)) -- )) -- db.create_unique('Reporting_interaction_failures', ['interaction_id', 'failureentry_id']) -- -- # Adding M2M table for field groups on 'Interaction' -- db.create_table('Reporting_interaction_groups', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('group', models.ForeignKey(orm['Reporting.group'], null=False)) -- )) -- db.create_unique('Reporting_interaction_groups', ['interaction_id', 'group_id']) -- -- # Adding M2M table for field bundles on 'Interaction' -- db.create_table('Reporting_interaction_bundles', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('bundle', models.ForeignKey(orm['Reporting.bundle'], null=False)) -- )) -- db.create_unique('Reporting_interaction_bundles', ['interaction_id', 'bundle_id']) -- -- # Adding model 'Performance' -- db.create_table('Reporting_performance', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('interaction', self.gf('django.db.models.fields.related.ForeignKey')(related_name='performance_items', to=orm['Reporting.Interaction'])), -- ('metric', self.gf('django.db.models.fields.CharField')(max_length=128)), -- ('value', self.gf('django.db.models.fields.DecimalField')(max_digits=32, decimal_places=16)), -- )) -- db.send_create_signal('Reporting', ['Performance']) -- -- # Adding model 'Group' -- db.create_table('Reporting_group', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)), -- ('profile', self.gf('django.db.models.fields.BooleanField')(default=False)), -- ('public', self.gf('django.db.models.fields.BooleanField')(default=False)), -- ('category', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)), -- ('comment', self.gf('django.db.models.fields.TextField')(blank=True)), -- )) -- db.send_create_signal('Reporting', ['Group']) -- -- # Adding M2M table for field groups on 'Group' -- db.create_table('Reporting_group_groups', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('from_group', models.ForeignKey(orm['Reporting.group'], null=False)), -- ('to_group', models.ForeignKey(orm['Reporting.group'], null=False)) -- )) -- db.create_unique('Reporting_group_groups', ['from_group_id', 'to_group_id']) -- -- # Adding M2M table for field bundles on 'Group' -- db.create_table('Reporting_group_bundles', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('group', models.ForeignKey(orm['Reporting.group'], null=False)), -- ('bundle', models.ForeignKey(orm['Reporting.bundle'], null=False)) -- )) -- db.create_unique('Reporting_group_bundles', ['group_id', 'bundle_id']) -- -- # Adding model 'Bundle' -- db.create_table('Reporting_bundle', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)), -- )) -- db.send_create_signal('Reporting', ['Bundle']) -- -- # Adding model 'FilePerms' -- db.create_table('Reporting_fileperms', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('owner', self.gf('django.db.models.fields.CharField')(max_length=128)), -- ('group', self.gf('django.db.models.fields.CharField')(max_length=128)), -- ('perms', self.gf('django.db.models.fields.CharField')(max_length=128)), -- )) -- db.send_create_signal('Reporting', ['FilePerms']) -- -- # Adding unique constraint on 'FilePerms', fields ['owner', 'group', 'perms'] -- db.create_unique('Reporting_fileperms', ['owner', 'group', 'perms']) -- -- # Adding model 'FileAcl' -- db.create_table('Reporting_fileacl', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -- )) -- db.send_create_signal('Reporting', ['FileAcl']) -- -- # Adding model 'FailureEntry' -- db.create_table('Reporting_failureentry', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -- ('hash_key', self.gf('django.db.models.fields.IntegerField')(db_index=True)), -- ('entry_type', self.gf('django.db.models.fields.CharField')(max_length=128)), -- ('message', self.gf('django.db.models.fields.TextField')()), -- )) -- db.send_create_signal('Reporting', ['FailureEntry']) -- -- # Adding model 'ActionEntry' -- db.create_table('Reporting_actionentry', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -- ('hash_key', self.gf('django.db.models.fields.IntegerField')(db_index=True)), -- ('state', self.gf('django.db.models.fields.IntegerField')()), -- ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -- ('status', self.gf('django.db.models.fields.CharField')(default='check', max_length=128)), -- ('output', self.gf('django.db.models.fields.IntegerField')(default=0)), -- )) -- db.send_create_signal('Reporting', ['ActionEntry']) -- -- # Adding model 'PackageEntry' -- db.create_table('Reporting_packageentry', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -- ('hash_key', self.gf('django.db.models.fields.IntegerField')(db_index=True)), -- ('state', self.gf('django.db.models.fields.IntegerField')()), -- ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -- ('target_version', self.gf('django.db.models.fields.CharField')(default='', max_length=1024)), -- ('current_version', self.gf('django.db.models.fields.CharField')(max_length=1024)), -- ('verification_details', self.gf('django.db.models.fields.TextField')(default='')), -- )) -- db.send_create_signal('Reporting', ['PackageEntry']) -- -- # Adding model 'PathEntry' -- db.create_table('Reporting_pathentry', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -- ('hash_key', self.gf('django.db.models.fields.IntegerField')(db_index=True)), -- ('state', self.gf('django.db.models.fields.IntegerField')()), -- ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -- ('path_type', self.gf('django.db.models.fields.CharField')(max_length=128)), -- ('target_perms', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['Reporting.FilePerms'])), -- ('current_perms', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['Reporting.FilePerms'])), -- ('detail_type', self.gf('django.db.models.fields.IntegerField')(default=0)), -- ('details', self.gf('django.db.models.fields.TextField')(default='')), -- )) -- db.send_create_signal('Reporting', ['PathEntry']) -- -- # Adding M2M table for field acls on 'PathEntry' -- db.create_table('Reporting_pathentry_acls', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('pathentry', models.ForeignKey(orm['Reporting.pathentry'], null=False)), -- ('fileacl', models.ForeignKey(orm['Reporting.fileacl'], null=False)) -- )) -- db.create_unique('Reporting_pathentry_acls', ['pathentry_id', 'fileacl_id']) -- -- # Adding model 'LinkEntry' -- db.create_table('Reporting_linkentry', ( -- ('pathentry_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['Reporting.PathEntry'], unique=True, primary_key=True)), -- ('target_path', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)), -- ('current_path', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)), -- )) -- db.send_create_signal('Reporting', ['LinkEntry']) -- -- # Adding model 'DeviceEntry' -- db.create_table('Reporting_deviceentry', ( -- ('pathentry_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['Reporting.PathEntry'], unique=True, primary_key=True)), -- ('device_type', self.gf('django.db.models.fields.CharField')(max_length=16)), -- ('target_major', self.gf('django.db.models.fields.IntegerField')()), -- ('target_minor', self.gf('django.db.models.fields.IntegerField')()), -- ('current_major', self.gf('django.db.models.fields.IntegerField')()), -- ('current_minor', self.gf('django.db.models.fields.IntegerField')()), -- )) -- db.send_create_signal('Reporting', ['DeviceEntry']) -- -- # Adding model 'ServiceEntry' -- db.create_table('Reporting_serviceentry', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -- ('hash_key', self.gf('django.db.models.fields.IntegerField')(db_index=True)), -- ('state', self.gf('django.db.models.fields.IntegerField')()), -- ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -- ('target_status', self.gf('django.db.models.fields.CharField')(default='', max_length=128)), -- ('current_status', self.gf('django.db.models.fields.CharField')(default='', max_length=128)), -- )) -- db.send_create_signal('Reporting', ['ServiceEntry']) -- -- -- def backwards(self, orm): -- # Removing unique constraint on 'FilePerms', fields ['owner', 'group', 'perms'] -- db.delete_unique('Reporting_fileperms', ['owner', 'group', 'perms']) -- -- # Removing unique constraint on 'Interaction', fields ['client', 'timestamp'] -- db.delete_unique('Reporting_interaction', ['client_id', 'timestamp']) -- -- # Deleting model 'Client' -- db.delete_table('Reporting_client') -- -- # Deleting model 'Interaction' -- db.delete_table('Reporting_interaction') -- -- # Removing M2M table for field actions on 'Interaction' -- db.delete_table('Reporting_interaction_actions') -- -- # Removing M2M table for field packages on 'Interaction' -- db.delete_table('Reporting_interaction_packages') -- -- # Removing M2M table for field paths on 'Interaction' -- db.delete_table('Reporting_interaction_paths') -- -- # Removing M2M table for field services on 'Interaction' -- db.delete_table('Reporting_interaction_services') -- -- # Removing M2M table for field failures on 'Interaction' -- db.delete_table('Reporting_interaction_failures') -- -- # Removing M2M table for field groups on 'Interaction' -- db.delete_table('Reporting_interaction_groups') -- -- # Removing M2M table for field bundles on 'Interaction' -- db.delete_table('Reporting_interaction_bundles') -- -- # Deleting model 'Performance' -- db.delete_table('Reporting_performance') -- -- # Deleting model 'Group' -- db.delete_table('Reporting_group') -- -- # Removing M2M table for field groups on 'Group' -- db.delete_table('Reporting_group_groups') -- -- # Removing M2M table for field bundles on 'Group' -- db.delete_table('Reporting_group_bundles') -- -- # Deleting model 'Bundle' -- db.delete_table('Reporting_bundle') -- -- # Deleting model 'FilePerms' -- db.delete_table('Reporting_fileperms') -- -- # Deleting model 'FileAcl' -- db.delete_table('Reporting_fileacl') -- -- # Deleting model 'FailureEntry' -- db.delete_table('Reporting_failureentry') -- -- # Deleting model 'ActionEntry' -- db.delete_table('Reporting_actionentry') -- -- # Deleting model 'PackageEntry' -- db.delete_table('Reporting_packageentry') -- -- # Deleting model 'PathEntry' -- db.delete_table('Reporting_pathentry') -- -- # Removing M2M table for field acls on 'PathEntry' -- db.delete_table('Reporting_pathentry_acls') -- -- # Deleting model 'LinkEntry' -- db.delete_table('Reporting_linkentry') -- -- # Deleting model 'DeviceEntry' -- db.delete_table('Reporting_deviceentry') -- -- # Deleting model 'ServiceEntry' -- db.delete_table('Reporting_serviceentry') -- -- -- models = { -- 'Reporting.actionentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) -- }, -- 'Reporting.bundle': { -- 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) -- }, -- 'Reporting.client': { -- 'Meta': {'object_name': 'Client'}, -- 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), -- 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), -- 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -- }, -- 'Reporting.deviceentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, -- 'current_major': ('django.db.models.fields.IntegerField', [], {}), -- 'current_minor': ('django.db.models.fields.IntegerField', [], {}), -- 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), -- 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -- 'target_major': ('django.db.models.fields.IntegerField', [], {}), -- 'target_minor': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.failureentry': { -- 'Meta': {'object_name': 'FailureEntry'}, -- 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'message': ('django.db.models.fields.TextField', [], {}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -- }, -- 'Reporting.fileacl': { -- 'Meta': {'object_name': 'FileAcl'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -- }, -- 'Reporting.fileperms': { -- 'Meta': {'unique_together': "(('owner', 'group', 'perms'),)", 'object_name': 'FilePerms'}, -- 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'perms': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -- }, -- 'Reporting.group': { -- 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, -- 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -- 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -- 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), -- 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), -- 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -- 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) -- }, -- 'Reporting.interaction': { -- 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, -- 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), -- 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -- 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), -- 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), -- 'good_count': ('django.db.models.fields.IntegerField', [], {}), -- 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), -- 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), -- 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.Group']"}), -- 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), -- 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), -- 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), -- 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), -- 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), -- 'total_count': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.linkentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, -- 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -- 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -- 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) -- }, -- 'Reporting.packageentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, -- 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), -- 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) -- }, -- 'Reporting.pathentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, -- 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), -- 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), -- 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) -- }, -- 'Reporting.performance': { -- 'Meta': {'object_name': 'Performance'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), -- 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) -- }, -- 'Reporting.serviceentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, -- 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) -- } -- } -- -- complete_apps = ['Reporting'] -\ No newline at end of file -+from __future__ import unicode_literals -+ -+from django.db import models, migrations -+ -+ -+class Migration(migrations.Migration): -+ -+ dependencies = [ -+ ] -+ -+ operations = [ -+ migrations.CreateModel( -+ name='ActionEntry', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(max_length=128, db_index=True)), -+ ('hash_key', models.BigIntegerField(editable=False, db_index=True)), -+ ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), -+ ('exists', models.BooleanField(default=True)), -+ ('status', models.CharField(default=b'check', max_length=128)), -+ ('output', models.IntegerField(default=0)), -+ ], -+ options={ -+ 'ordering': ('state', 'name'), -+ 'abstract': False, -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='Bundle', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(unique=True, max_length=255)), -+ ], -+ options={ -+ 'ordering': ('name',), -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='Client', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('creation', models.DateTimeField(auto_now_add=True)), -+ ('name', models.CharField(max_length=128)), -+ ('expiration', models.DateTimeField(null=True, blank=True)), -+ ], -+ options={ -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='FailureEntry', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(max_length=128, db_index=True)), -+ ('hash_key', models.BigIntegerField(editable=False, db_index=True)), -+ ('entry_type', models.CharField(max_length=128)), -+ ('message', models.TextField()), -+ ], -+ options={ -+ 'abstract': False, -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='FileAcl', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(max_length=128, db_index=True)), -+ ], -+ options={ -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='FilePerms', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('owner', models.CharField(max_length=128)), -+ ('group', models.CharField(max_length=128)), -+ ('mode', models.CharField(max_length=128)), -+ ], -+ options={ -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='Group', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(unique=True, max_length=255)), -+ ('profile', models.BooleanField(default=False)), -+ ('public', models.BooleanField(default=False)), -+ ('category', models.CharField(max_length=1024, blank=True)), -+ ('comment', models.TextField(blank=True)), -+ ('bundles', models.ManyToManyField(to='Reporting.Bundle')), -+ ('groups', models.ManyToManyField(to='Reporting.Group')), -+ ], -+ options={ -+ 'ordering': ('name',), -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='Interaction', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('timestamp', models.DateTimeField(db_index=True)), -+ ('state', models.CharField(max_length=32)), -+ ('repo_rev_code', models.CharField(max_length=64)), -+ ('server', models.CharField(max_length=256)), -+ ('good_count', models.IntegerField()), -+ ('total_count', models.IntegerField()), -+ ('bad_count', models.IntegerField(default=0)), -+ ('modified_count', models.IntegerField(default=0)), -+ ('extra_count', models.IntegerField(default=0)), -+ ('actions', models.ManyToManyField(to='Reporting.ActionEntry')), -+ ('bundles', models.ManyToManyField(to='Reporting.Bundle')), -+ ('client', models.ForeignKey(related_name='interactions', to='Reporting.Client')), -+ ('failures', models.ManyToManyField(to='Reporting.FailureEntry')), -+ ('groups', models.ManyToManyField(to='Reporting.Group')), -+ ], -+ options={ -+ 'ordering': ['-timestamp'], -+ 'get_latest_by': 'timestamp', -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='PackageEntry', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(max_length=128, db_index=True)), -+ ('hash_key', models.BigIntegerField(editable=False, db_index=True)), -+ ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), -+ ('exists', models.BooleanField(default=True)), -+ ('target_version', models.CharField(default=b'', max_length=1024)), -+ ('current_version', models.CharField(max_length=1024)), -+ ('verification_details', models.TextField(default=b'')), -+ ], -+ options={ -+ 'ordering': ('state', 'name'), -+ 'abstract': False, -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='PathEntry', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(max_length=128, db_index=True)), -+ ('hash_key', models.BigIntegerField(editable=False, db_index=True)), -+ ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), -+ ('exists', models.BooleanField(default=True)), -+ ('path_type', models.CharField(max_length=128, choices=[(b'device', b'Device'), (b'directory', b'Directory'), (b'hardlink', b'Hard Link'), (b'nonexistent', b'Non Existent'), (b'permissions', b'Permissions'), (b'symlink', b'Symlink')])), -+ ('detail_type', models.IntegerField(default=0, choices=[(0, b'Unused'), (1, b'Diff'), (2, b'Binary'), (3, b'Sensitive'), (4, b'Size limit exceeded'), (5, b'VCS output'), (6, b'Pruned paths')])), -+ ('details', models.TextField(default=b'')), -+ ], -+ options={ -+ 'ordering': ('state', 'name'), -+ 'abstract': False, -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='LinkEntry', -+ fields=[ -+ ('pathentry_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='Reporting.PathEntry')), -+ ('target_path', models.CharField(max_length=1024, blank=True)), -+ ('current_path', models.CharField(max_length=1024, blank=True)), -+ ], -+ options={ -+ 'ordering': ('state', 'name'), -+ 'abstract': False, -+ }, -+ bases=('Reporting.pathentry',), -+ ), -+ migrations.CreateModel( -+ name='DeviceEntry', -+ fields=[ -+ ('pathentry_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='Reporting.PathEntry')), -+ ('device_type', models.CharField(max_length=16, choices=[(b'block', b'Block'), (b'char', b'Char'), (b'fifo', b'Fifo')])), -+ ('target_major', models.IntegerField()), -+ ('target_minor', models.IntegerField()), -+ ('current_major', models.IntegerField()), -+ ('current_minor', models.IntegerField()), -+ ], -+ options={ -+ 'ordering': ('state', 'name'), -+ 'abstract': False, -+ }, -+ bases=('Reporting.pathentry',), -+ ), -+ migrations.CreateModel( -+ name='Performance', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('metric', models.CharField(max_length=128)), -+ ('value', models.DecimalField(max_digits=32, decimal_places=16)), -+ ('interaction', models.ForeignKey(related_name='performance_items', to='Reporting.Interaction')), -+ ], -+ options={ -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='POSIXGroupEntry', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(max_length=128, db_index=True)), -+ ('hash_key', models.BigIntegerField(editable=False, db_index=True)), -+ ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), -+ ('exists', models.BooleanField(default=True)), -+ ('gid', models.IntegerField(null=True)), -+ ('current_gid', models.IntegerField(null=True)), -+ ], -+ options={ -+ 'ordering': ('state', 'name'), -+ 'abstract': False, -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='POSIXUserEntry', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(max_length=128, db_index=True)), -+ ('hash_key', models.BigIntegerField(editable=False, db_index=True)), -+ ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), -+ ('exists', models.BooleanField(default=True)), -+ ('uid', models.IntegerField(null=True)), -+ ('current_uid', models.IntegerField(null=True)), -+ ('group', models.CharField(max_length=64)), -+ ('current_group', models.CharField(max_length=64, null=True)), -+ ('gecos', models.CharField(max_length=1024)), -+ ('current_gecos', models.CharField(max_length=1024, null=True)), -+ ('home', models.CharField(max_length=1024)), -+ ('current_home', models.CharField(max_length=1024, null=True)), -+ ('shell', models.CharField(default=b'/bin/bash', max_length=1024)), -+ ('current_shell', models.CharField(max_length=1024, null=True)), -+ ], -+ options={ -+ 'ordering': ('state', 'name'), -+ 'abstract': False, -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='SEBooleanEntry', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(max_length=128, db_index=True)), -+ ('hash_key', models.BigIntegerField(editable=False, db_index=True)), -+ ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), -+ ('exists', models.BooleanField(default=True)), -+ ('value', models.BooleanField(default=True)), -+ ], -+ options={ -+ 'ordering': ('state', 'name'), -+ 'abstract': False, -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='SEFcontextEntry', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(max_length=128, db_index=True)), -+ ('hash_key', models.BigIntegerField(editable=False, db_index=True)), -+ ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), -+ ('exists', models.BooleanField(default=True)), -+ ('selinuxtype', models.CharField(max_length=128)), -+ ('current_selinuxtype', models.CharField(max_length=128, null=True)), -+ ('filetype', models.CharField(max_length=16)), -+ ], -+ options={ -+ 'ordering': ('state', 'name'), -+ 'abstract': False, -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='SEInterfaceEntry', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(max_length=128, db_index=True)), -+ ('hash_key', models.BigIntegerField(editable=False, db_index=True)), -+ ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), -+ ('exists', models.BooleanField(default=True)), -+ ('selinuxtype', models.CharField(max_length=128)), -+ ('current_selinuxtype', models.CharField(max_length=128, null=True)), -+ ], -+ options={ -+ 'ordering': ('state', 'name'), -+ 'abstract': False, -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='SELoginEntry', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(max_length=128, db_index=True)), -+ ('hash_key', models.BigIntegerField(editable=False, db_index=True)), -+ ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), -+ ('exists', models.BooleanField(default=True)), -+ ('selinuxuser', models.CharField(max_length=128)), -+ ('current_selinuxuser', models.CharField(max_length=128, null=True)), -+ ], -+ options={ -+ 'ordering': ('state', 'name'), -+ 'abstract': False, -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='SEModuleEntry', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(max_length=128, db_index=True)), -+ ('hash_key', models.BigIntegerField(editable=False, db_index=True)), -+ ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), -+ ('exists', models.BooleanField(default=True)), -+ ('disabled', models.BooleanField(default=False)), -+ ('current_disabled', models.BooleanField(default=False)), -+ ], -+ options={ -+ 'ordering': ('state', 'name'), -+ 'abstract': False, -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='SENodeEntry', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(max_length=128, db_index=True)), -+ ('hash_key', models.BigIntegerField(editable=False, db_index=True)), -+ ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), -+ ('exists', models.BooleanField(default=True)), -+ ('selinuxtype', models.CharField(max_length=128)), -+ ('current_selinuxtype', models.CharField(max_length=128, null=True)), -+ ('proto', models.CharField(max_length=4)), -+ ], -+ options={ -+ 'ordering': ('state', 'name'), -+ 'abstract': False, -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='SEPermissiveEntry', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(max_length=128, db_index=True)), -+ ('hash_key', models.BigIntegerField(editable=False, db_index=True)), -+ ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), -+ ('exists', models.BooleanField(default=True)), -+ ], -+ options={ -+ 'ordering': ('state', 'name'), -+ 'abstract': False, -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='SEPortEntry', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(max_length=128, db_index=True)), -+ ('hash_key', models.BigIntegerField(editable=False, db_index=True)), -+ ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), -+ ('exists', models.BooleanField(default=True)), -+ ('selinuxtype', models.CharField(max_length=128)), -+ ('current_selinuxtype', models.CharField(max_length=128, null=True)), -+ ], -+ options={ -+ 'ordering': ('state', 'name'), -+ 'abstract': False, -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='ServiceEntry', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(max_length=128, db_index=True)), -+ ('hash_key', models.BigIntegerField(editable=False, db_index=True)), -+ ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), -+ ('exists', models.BooleanField(default=True)), -+ ('target_status', models.CharField(default=b'', max_length=128)), -+ ('current_status', models.CharField(default=b'', max_length=128)), -+ ], -+ options={ -+ 'ordering': ('state', 'name'), -+ 'abstract': False, -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.CreateModel( -+ name='SEUserEntry', -+ fields=[ -+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), -+ ('name', models.CharField(max_length=128, db_index=True)), -+ ('hash_key', models.BigIntegerField(editable=False, db_index=True)), -+ ('state', models.IntegerField(choices=[(0, b'Good'), (1, b'Bad'), (2, b'Modified'), (3, b'Extra')])), -+ ('exists', models.BooleanField(default=True)), -+ ('roles', models.CharField(max_length=128)), -+ ('current_roles', models.CharField(max_length=128, null=True)), -+ ('prefix', models.CharField(max_length=128)), -+ ('current_prefix', models.CharField(max_length=128, null=True)), -+ ], -+ options={ -+ 'ordering': ('state', 'name'), -+ 'abstract': False, -+ }, -+ bases=(models.Model,), -+ ), -+ migrations.AddField( -+ model_name='pathentry', -+ name='acls', -+ field=models.ManyToManyField(to='Reporting.FileAcl'), -+ preserve_default=True, -+ ), -+ migrations.AddField( -+ model_name='pathentry', -+ name='current_perms', -+ field=models.ForeignKey(related_name='+', to='Reporting.FilePerms'), -+ preserve_default=True, -+ ), -+ migrations.AddField( -+ model_name='pathentry', -+ name='target_perms', -+ field=models.ForeignKey(related_name='+', to='Reporting.FilePerms'), -+ preserve_default=True, -+ ), -+ migrations.AddField( -+ model_name='interaction', -+ name='packages', -+ field=models.ManyToManyField(to='Reporting.PackageEntry'), -+ preserve_default=True, -+ ), -+ migrations.AddField( -+ model_name='interaction', -+ name='paths', -+ field=models.ManyToManyField(to='Reporting.PathEntry'), -+ preserve_default=True, -+ ), -+ migrations.AddField( -+ model_name='interaction', -+ name='posixgroups', -+ field=models.ManyToManyField(to='Reporting.POSIXGroupEntry'), -+ preserve_default=True, -+ ), -+ migrations.AddField( -+ model_name='interaction', -+ name='posixusers', -+ field=models.ManyToManyField(to='Reporting.POSIXUserEntry'), -+ preserve_default=True, -+ ), -+ migrations.AddField( -+ model_name='interaction', -+ name='profile', -+ field=models.ForeignKey(related_name='+', to='Reporting.Group', null=True), -+ preserve_default=True, -+ ), -+ migrations.AddField( -+ model_name='interaction', -+ name='sebooleans', -+ field=models.ManyToManyField(to='Reporting.SEBooleanEntry'), -+ preserve_default=True, -+ ), -+ migrations.AddField( -+ model_name='interaction', -+ name='sefcontexts', -+ field=models.ManyToManyField(to='Reporting.SEFcontextEntry'), -+ preserve_default=True, -+ ), -+ migrations.AddField( -+ model_name='interaction', -+ name='seinterfaces', -+ field=models.ManyToManyField(to='Reporting.SEInterfaceEntry'), -+ preserve_default=True, -+ ), -+ migrations.AddField( -+ model_name='interaction', -+ name='selogins', -+ field=models.ManyToManyField(to='Reporting.SELoginEntry'), -+ preserve_default=True, -+ ), -+ migrations.AddField( -+ model_name='interaction', -+ name='semodules', -+ field=models.ManyToManyField(to='Reporting.SEModuleEntry'), -+ preserve_default=True, -+ ), -+ migrations.AddField( -+ model_name='interaction', -+ name='senodes', -+ field=models.ManyToManyField(to='Reporting.SENodeEntry'), -+ preserve_default=True, -+ ), -+ migrations.AddField( -+ model_name='interaction', -+ name='sepermissives', -+ field=models.ManyToManyField(to='Reporting.SEPermissiveEntry'), -+ preserve_default=True, -+ ), -+ migrations.AddField( -+ model_name='interaction', -+ name='seports', -+ field=models.ManyToManyField(to='Reporting.SEPortEntry'), -+ preserve_default=True, -+ ), -+ migrations.AddField( -+ model_name='interaction', -+ name='services', -+ field=models.ManyToManyField(to='Reporting.ServiceEntry'), -+ preserve_default=True, -+ ), -+ migrations.AddField( -+ model_name='interaction', -+ name='seusers', -+ field=models.ManyToManyField(to='Reporting.SEUserEntry'), -+ preserve_default=True, -+ ), -+ migrations.AlterUniqueTogether( -+ name='interaction', -+ unique_together=set([('client', 'timestamp')]), -+ ), -+ migrations.AlterUniqueTogether( -+ name='fileperms', -+ unique_together=set([('owner', 'group', 'mode')]), -+ ), -+ migrations.AddField( -+ model_name='client', -+ name='current_interaction', -+ field=models.ForeignKey(related_name='parent_client', blank=True, to='Reporting.Interaction', null=True), -+ preserve_default=True, -+ ), -+ ] -diff --git a/src/lib/Bcfg2/Reporting/migrations/0002_convert_perms_to_mode.py b/src/lib/Bcfg2/Reporting/migrations/0002_convert_perms_to_mode.py -deleted file mode 100644 -index 668094c..0000000 ---- a/src/lib/Bcfg2/Reporting/migrations/0002_convert_perms_to_mode.py -+++ /dev/null -@@ -1,171 +0,0 @@ --# -*- coding: utf-8 -*- --import datetime --from south.db import db --from south.v2 import SchemaMigration --from django.db import models -- --from Bcfg2 import settings -- --class Migration(SchemaMigration): -- -- def forwards(self, orm): -- # Removing unique constraint on 'FilePerms', fields ['owner', 'perms', 'group'] -- db.delete_unique('Reporting_fileperms', ['owner', 'perms', 'group']) -- -- # Renaming field 'FilePerms.perms' to 'FilePerms.mode' -- db.rename_column('Reporting_fileperms', 'perms', 'mode') -- -- if not settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3': -- # Adding unique constraint on 'FilePerms', fields ['owner', 'group', 'mode'] -- db.create_unique('Reporting_fileperms', ['owner', 'group', 'mode']) -- -- -- def backwards(self, orm): -- # Removing unique constraint on 'FilePerms', fields ['owner', 'group', 'mode'] -- db.delete_unique('Reporting_fileperms', ['owner', 'group', 'mode']) -- -- # Renaming field 'FilePerms.mode' to 'FilePerms.perms' -- db.rename_column('Reporting_fileperms', 'mode', 'perms') -- -- if not settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3': -- # Adding unique constraint on 'FilePerms', fields ['owner', 'perms', 'group'] -- db.create_unique('Reporting_fileperms', ['owner', 'perms', 'group']) -- -- -- models = { -- 'Reporting.actionentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) -- }, -- 'Reporting.bundle': { -- 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) -- }, -- 'Reporting.client': { -- 'Meta': {'object_name': 'Client'}, -- 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), -- 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), -- 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -- }, -- 'Reporting.deviceentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, -- 'current_major': ('django.db.models.fields.IntegerField', [], {}), -- 'current_minor': ('django.db.models.fields.IntegerField', [], {}), -- 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), -- 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -- 'target_major': ('django.db.models.fields.IntegerField', [], {}), -- 'target_minor': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.failureentry': { -- 'Meta': {'object_name': 'FailureEntry'}, -- 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'message': ('django.db.models.fields.TextField', [], {}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -- }, -- 'Reporting.fileacl': { -- 'Meta': {'object_name': 'FileAcl'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -- }, -- 'Reporting.fileperms': { -- 'Meta': {'unique_together': "(('owner', 'group', 'mode'),)", 'object_name': 'FilePerms'}, -- 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'mode': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -- }, -- 'Reporting.group': { -- 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, -- 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -- 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -- 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), -- 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), -- 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -- 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) -- }, -- 'Reporting.interaction': { -- 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, -- 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), -- 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -- 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), -- 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), -- 'good_count': ('django.db.models.fields.IntegerField', [], {}), -- 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), -- 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), -- 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.Group']"}), -- 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), -- 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), -- 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), -- 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), -- 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), -- 'total_count': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.linkentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, -- 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -- 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -- 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) -- }, -- 'Reporting.packageentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, -- 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), -- 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) -- }, -- 'Reporting.pathentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, -- 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), -- 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), -- 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) -- }, -- 'Reporting.performance': { -- 'Meta': {'object_name': 'Performance'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), -- 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) -- }, -- 'Reporting.serviceentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, -- 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) -- } -- } -- -- complete_apps = ['Reporting'] -diff --git a/src/lib/Bcfg2/Reporting/migrations/0003_expand_hash_key.py b/src/lib/Bcfg2/Reporting/migrations/0003_expand_hash_key.py -deleted file mode 100644 -index 2da1fa7..0000000 ---- a/src/lib/Bcfg2/Reporting/migrations/0003_expand_hash_key.py -+++ /dev/null -@@ -1,180 +0,0 @@ --# -*- coding: utf-8 -*- --import datetime --from south.db import db --from south.v2 import SchemaMigration --from django.db import models -- -- --class Migration(SchemaMigration): -- -- def forwards(self, orm): -- -- # Changing field 'FailureEntry.hash_key' -- db.alter_column('Reporting_failureentry', 'hash_key', self.gf('django.db.models.fields.BigIntegerField')()) -- -- # Changing field 'PackageEntry.hash_key' -- db.alter_column('Reporting_packageentry', 'hash_key', self.gf('django.db.models.fields.BigIntegerField')()) -- -- # Changing field 'ServiceEntry.hash_key' -- db.alter_column('Reporting_serviceentry', 'hash_key', self.gf('django.db.models.fields.BigIntegerField')()) -- -- # Changing field 'PathEntry.hash_key' -- db.alter_column('Reporting_pathentry', 'hash_key', self.gf('django.db.models.fields.BigIntegerField')()) -- -- # Changing field 'ActionEntry.hash_key' -- db.alter_column('Reporting_actionentry', 'hash_key', self.gf('django.db.models.fields.BigIntegerField')()) -- -- def backwards(self, orm): -- -- # Changing field 'FailureEntry.hash_key' -- db.alter_column('Reporting_failureentry', 'hash_key', self.gf('django.db.models.fields.IntegerField')()) -- -- # Changing field 'PackageEntry.hash_key' -- db.alter_column('Reporting_packageentry', 'hash_key', self.gf('django.db.models.fields.IntegerField')()) -- -- # Changing field 'ServiceEntry.hash_key' -- db.alter_column('Reporting_serviceentry', 'hash_key', self.gf('django.db.models.fields.IntegerField')()) -- -- # Changing field 'PathEntry.hash_key' -- db.alter_column('Reporting_pathentry', 'hash_key', self.gf('django.db.models.fields.IntegerField')()) -- -- # Changing field 'ActionEntry.hash_key' -- db.alter_column('Reporting_actionentry', 'hash_key', self.gf('django.db.models.fields.IntegerField')()) -- -- models = { -- 'Reporting.actionentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) -- }, -- 'Reporting.bundle': { -- 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) -- }, -- 'Reporting.client': { -- 'Meta': {'object_name': 'Client'}, -- 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), -- 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), -- 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -- }, -- 'Reporting.deviceentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, -- 'current_major': ('django.db.models.fields.IntegerField', [], {}), -- 'current_minor': ('django.db.models.fields.IntegerField', [], {}), -- 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), -- 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -- 'target_major': ('django.db.models.fields.IntegerField', [], {}), -- 'target_minor': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.failureentry': { -- 'Meta': {'object_name': 'FailureEntry'}, -- 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'message': ('django.db.models.fields.TextField', [], {}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -- }, -- 'Reporting.fileacl': { -- 'Meta': {'object_name': 'FileAcl'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -- }, -- 'Reporting.fileperms': { -- 'Meta': {'unique_together': "(('owner', 'group', 'mode'),)", 'object_name': 'FilePerms'}, -- 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'mode': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -- }, -- 'Reporting.group': { -- 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, -- 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -- 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -- 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), -- 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), -- 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -- 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) -- }, -- 'Reporting.interaction': { -- 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, -- 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), -- 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -- 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), -- 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), -- 'good_count': ('django.db.models.fields.IntegerField', [], {}), -- 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), -- 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), -- 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.Group']"}), -- 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), -- 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), -- 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), -- 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), -- 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), -- 'total_count': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.linkentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, -- 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -- 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -- 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) -- }, -- 'Reporting.packageentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, -- 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), -- 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) -- }, -- 'Reporting.pathentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, -- 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), -- 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), -- 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) -- }, -- 'Reporting.performance': { -- 'Meta': {'object_name': 'Performance'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), -- 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) -- }, -- 'Reporting.serviceentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, -- 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) -- } -- } -- -- complete_apps = ['Reporting'] -\ No newline at end of file -diff --git a/src/lib/Bcfg2/Reporting/migrations/0004_profile_can_be_null.py b/src/lib/Bcfg2/Reporting/migrations/0004_profile_can_be_null.py -deleted file mode 100644 -index 26a053b..0000000 ---- a/src/lib/Bcfg2/Reporting/migrations/0004_profile_can_be_null.py -+++ /dev/null -@@ -1,156 +0,0 @@ --# -*- coding: utf-8 -*- --import datetime --from south.db import db --from south.v2 import SchemaMigration --from django.db import models -- -- --class Migration(SchemaMigration): -- -- def forwards(self, orm): -- -- # Changing field 'Interaction.profile' -- db.alter_column('Reporting_interaction', 'profile_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['Reporting.Group'])) -- -- def backwards(self, orm): -- -- # User chose to not deal with backwards NULL issues for 'Interaction.profile' -- raise RuntimeError("Cannot reverse this migration. 'Interaction.profile' and its values cannot be restored.") -- -- models = { -- 'Reporting.actionentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) -- }, -- 'Reporting.bundle': { -- 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) -- }, -- 'Reporting.client': { -- 'Meta': {'object_name': 'Client'}, -- 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), -- 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), -- 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -- }, -- 'Reporting.deviceentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, -- 'current_major': ('django.db.models.fields.IntegerField', [], {}), -- 'current_minor': ('django.db.models.fields.IntegerField', [], {}), -- 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), -- 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -- 'target_major': ('django.db.models.fields.IntegerField', [], {}), -- 'target_minor': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.failureentry': { -- 'Meta': {'object_name': 'FailureEntry'}, -- 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'message': ('django.db.models.fields.TextField', [], {}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -- }, -- 'Reporting.fileacl': { -- 'Meta': {'object_name': 'FileAcl'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -- }, -- 'Reporting.fileperms': { -- 'Meta': {'unique_together': "(('owner', 'group', 'mode'),)", 'object_name': 'FilePerms'}, -- 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'mode': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -- }, -- 'Reporting.group': { -- 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, -- 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -- 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -- 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), -- 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), -- 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -- 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) -- }, -- 'Reporting.interaction': { -- 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, -- 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), -- 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -- 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), -- 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), -- 'good_count': ('django.db.models.fields.IntegerField', [], {}), -- 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), -- 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), -- 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['Reporting.Group']"}), -- 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), -- 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), -- 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), -- 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), -- 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), -- 'total_count': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.linkentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, -- 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -- 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -- 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) -- }, -- 'Reporting.packageentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, -- 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), -- 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) -- }, -- 'Reporting.pathentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, -- 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), -- 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), -- 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) -- }, -- 'Reporting.performance': { -- 'Meta': {'object_name': 'Performance'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), -- 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) -- }, -- 'Reporting.serviceentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, -- 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) -- } -- } -- -- complete_apps = ['Reporting'] -\ No newline at end of file -diff --git a/src/lib/Bcfg2/Reporting/migrations/0005_add_selinux_entry_support.py b/src/lib/Bcfg2/Reporting/migrations/0005_add_selinux_entry_support.py -deleted file mode 100644 -index d5f5d80..0000000 ---- a/src/lib/Bcfg2/Reporting/migrations/0005_add_selinux_entry_support.py -+++ /dev/null -@@ -1,485 +0,0 @@ --# -*- coding: utf-8 -*- --import datetime --from south.db import db --from south.v2 import SchemaMigration --from django.db import models -- -- --class Migration(SchemaMigration): -- -- def forwards(self, orm): -- # Adding model 'SELoginEntry' -- db.create_table('Reporting_seloginentry', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -- ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -- ('state', self.gf('django.db.models.fields.IntegerField')()), -- ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -- ('selinuxuser', self.gf('django.db.models.fields.CharField')(max_length=128)), -- ('current_selinuxuser', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), -- )) -- db.send_create_signal('Reporting', ['SELoginEntry']) -- -- # Adding model 'SEUserEntry' -- db.create_table('Reporting_seuserentry', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -- ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -- ('state', self.gf('django.db.models.fields.IntegerField')()), -- ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -- ('roles', self.gf('django.db.models.fields.CharField')(max_length=128)), -- ('current_roles', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), -- ('prefix', self.gf('django.db.models.fields.CharField')(max_length=128)), -- ('current_prefix', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), -- )) -- db.send_create_signal('Reporting', ['SEUserEntry']) -- -- # Adding model 'SEBooleanEntry' -- db.create_table('Reporting_sebooleanentry', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -- ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -- ('state', self.gf('django.db.models.fields.IntegerField')()), -- ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -- ('value', self.gf('django.db.models.fields.BooleanField')(default=True)), -- )) -- db.send_create_signal('Reporting', ['SEBooleanEntry']) -- -- # Adding model 'SENodeEntry' -- db.create_table('Reporting_senodeentry', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -- ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -- ('state', self.gf('django.db.models.fields.IntegerField')()), -- ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -- ('selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128)), -- ('current_selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), -- ('proto', self.gf('django.db.models.fields.CharField')(max_length=4)), -- )) -- db.send_create_signal('Reporting', ['SENodeEntry']) -- -- # Adding model 'SEFcontextEntry' -- db.create_table('Reporting_sefcontextentry', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -- ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -- ('state', self.gf('django.db.models.fields.IntegerField')()), -- ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -- ('selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128)), -- ('current_selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), -- ('filetype', self.gf('django.db.models.fields.CharField')(max_length=16)), -- )) -- db.send_create_signal('Reporting', ['SEFcontextEntry']) -- -- # Adding model 'SEInterfaceEntry' -- db.create_table('Reporting_seinterfaceentry', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -- ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -- ('state', self.gf('django.db.models.fields.IntegerField')()), -- ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -- ('selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128)), -- ('current_selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), -- )) -- db.send_create_signal('Reporting', ['SEInterfaceEntry']) -- -- # Adding model 'SEPermissiveEntry' -- db.create_table('Reporting_sepermissiveentry', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -- ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -- ('state', self.gf('django.db.models.fields.IntegerField')()), -- ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -- )) -- db.send_create_signal('Reporting', ['SEPermissiveEntry']) -- -- # Adding model 'SEModuleEntry' -- db.create_table('Reporting_semoduleentry', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -- ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -- ('state', self.gf('django.db.models.fields.IntegerField')()), -- ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -- ('disabled', self.gf('django.db.models.fields.BooleanField')(default=False)), -- ('current_disabled', self.gf('django.db.models.fields.BooleanField')(default=False)), -- )) -- db.send_create_signal('Reporting', ['SEModuleEntry']) -- -- # Adding model 'SEPortEntry' -- db.create_table('Reporting_seportentry', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -- ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -- ('state', self.gf('django.db.models.fields.IntegerField')()), -- ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -- ('selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128)), -- ('current_selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), -- )) -- db.send_create_signal('Reporting', ['SEPortEntry']) -- -- # Adding M2M table for field sebooleans on 'Interaction' -- db.create_table('Reporting_interaction_sebooleans', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('sebooleanentry', models.ForeignKey(orm['Reporting.sebooleanentry'], null=False)) -- )) -- db.create_unique('Reporting_interaction_sebooleans', ['interaction_id', 'sebooleanentry_id']) -- -- # Adding M2M table for field seports on 'Interaction' -- db.create_table('Reporting_interaction_seports', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('seportentry', models.ForeignKey(orm['Reporting.seportentry'], null=False)) -- )) -- db.create_unique('Reporting_interaction_seports', ['interaction_id', 'seportentry_id']) -- -- # Adding M2M table for field sefcontexts on 'Interaction' -- db.create_table('Reporting_interaction_sefcontexts', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('sefcontextentry', models.ForeignKey(orm['Reporting.sefcontextentry'], null=False)) -- )) -- db.create_unique('Reporting_interaction_sefcontexts', ['interaction_id', 'sefcontextentry_id']) -- -- # Adding M2M table for field senodes on 'Interaction' -- db.create_table('Reporting_interaction_senodes', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('senodeentry', models.ForeignKey(orm['Reporting.senodeentry'], null=False)) -- )) -- db.create_unique('Reporting_interaction_senodes', ['interaction_id', 'senodeentry_id']) -- -- # Adding M2M table for field selogins on 'Interaction' -- db.create_table('Reporting_interaction_selogins', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('seloginentry', models.ForeignKey(orm['Reporting.seloginentry'], null=False)) -- )) -- db.create_unique('Reporting_interaction_selogins', ['interaction_id', 'seloginentry_id']) -- -- # Adding M2M table for field seusers on 'Interaction' -- db.create_table('Reporting_interaction_seusers', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('seuserentry', models.ForeignKey(orm['Reporting.seuserentry'], null=False)) -- )) -- db.create_unique('Reporting_interaction_seusers', ['interaction_id', 'seuserentry_id']) -- -- # Adding M2M table for field seinterfaces on 'Interaction' -- db.create_table('Reporting_interaction_seinterfaces', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('seinterfaceentry', models.ForeignKey(orm['Reporting.seinterfaceentry'], null=False)) -- )) -- db.create_unique('Reporting_interaction_seinterfaces', ['interaction_id', 'seinterfaceentry_id']) -- -- # Adding M2M table for field sepermissives on 'Interaction' -- db.create_table('Reporting_interaction_sepermissives', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('sepermissiveentry', models.ForeignKey(orm['Reporting.sepermissiveentry'], null=False)) -- )) -- db.create_unique('Reporting_interaction_sepermissives', ['interaction_id', 'sepermissiveentry_id']) -- -- # Adding M2M table for field semodules on 'Interaction' -- db.create_table('Reporting_interaction_semodules', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('semoduleentry', models.ForeignKey(orm['Reporting.semoduleentry'], null=False)) -- )) -- db.create_unique('Reporting_interaction_semodules', ['interaction_id', 'semoduleentry_id']) -- -- -- def backwards(self, orm): -- # Deleting model 'SELoginEntry' -- db.delete_table('Reporting_seloginentry') -- -- # Deleting model 'SEUserEntry' -- db.delete_table('Reporting_seuserentry') -- -- # Deleting model 'SEBooleanEntry' -- db.delete_table('Reporting_sebooleanentry') -- -- # Deleting model 'SENodeEntry' -- db.delete_table('Reporting_senodeentry') -- -- # Deleting model 'SEFcontextEntry' -- db.delete_table('Reporting_sefcontextentry') -- -- # Deleting model 'SEInterfaceEntry' -- db.delete_table('Reporting_seinterfaceentry') -- -- # Deleting model 'SEPermissiveEntry' -- db.delete_table('Reporting_sepermissiveentry') -- -- # Deleting model 'SEModuleEntry' -- db.delete_table('Reporting_semoduleentry') -- -- # Deleting model 'SEPortEntry' -- db.delete_table('Reporting_seportentry') -- -- # Removing M2M table for field sebooleans on 'Interaction' -- db.delete_table('Reporting_interaction_sebooleans') -- -- # Removing M2M table for field seports on 'Interaction' -- db.delete_table('Reporting_interaction_seports') -- -- # Removing M2M table for field sefcontexts on 'Interaction' -- db.delete_table('Reporting_interaction_sefcontexts') -- -- # Removing M2M table for field senodes on 'Interaction' -- db.delete_table('Reporting_interaction_senodes') -- -- # Removing M2M table for field selogins on 'Interaction' -- db.delete_table('Reporting_interaction_selogins') -- -- # Removing M2M table for field seusers on 'Interaction' -- db.delete_table('Reporting_interaction_seusers') -- -- # Removing M2M table for field seinterfaces on 'Interaction' -- db.delete_table('Reporting_interaction_seinterfaces') -- -- # Removing M2M table for field sepermissives on 'Interaction' -- db.delete_table('Reporting_interaction_sepermissives') -- -- # Removing M2M table for field semodules on 'Interaction' -- db.delete_table('Reporting_interaction_semodules') -- -- -- models = { -- 'Reporting.actionentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) -- }, -- 'Reporting.bundle': { -- 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) -- }, -- 'Reporting.client': { -- 'Meta': {'object_name': 'Client'}, -- 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), -- 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), -- 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -- }, -- 'Reporting.deviceentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, -- 'current_major': ('django.db.models.fields.IntegerField', [], {}), -- 'current_minor': ('django.db.models.fields.IntegerField', [], {}), -- 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), -- 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -- 'target_major': ('django.db.models.fields.IntegerField', [], {}), -- 'target_minor': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.failureentry': { -- 'Meta': {'object_name': 'FailureEntry'}, -- 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'message': ('django.db.models.fields.TextField', [], {}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -- }, -- 'Reporting.fileacl': { -- 'Meta': {'object_name': 'FileAcl'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -- }, -- 'Reporting.fileperms': { -- 'Meta': {'unique_together': "(('owner', 'group', 'mode'),)", 'object_name': 'FilePerms'}, -- 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'mode': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -- }, -- 'Reporting.group': { -- 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, -- 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -- 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -- 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), -- 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), -- 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -- 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) -- }, -- 'Reporting.interaction': { -- 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, -- 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), -- 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -- 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), -- 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), -- 'good_count': ('django.db.models.fields.IntegerField', [], {}), -- 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), -- 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), -- 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['Reporting.Group']"}), -- 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), -- 'sebooleans': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEBooleanEntry']", 'symmetrical': 'False'}), -- 'sefcontexts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEFcontextEntry']", 'symmetrical': 'False'}), -- 'seinterfaces': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEInterfaceEntry']", 'symmetrical': 'False'}), -- 'selogins': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SELoginEntry']", 'symmetrical': 'False'}), -- 'semodules': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEModuleEntry']", 'symmetrical': 'False'}), -- 'senodes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SENodeEntry']", 'symmetrical': 'False'}), -- 'sepermissives': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEPermissiveEntry']", 'symmetrical': 'False'}), -- 'seports': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEPortEntry']", 'symmetrical': 'False'}), -- 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), -- 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), -- 'seusers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEUserEntry']", 'symmetrical': 'False'}), -- 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), -- 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), -- 'total_count': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.linkentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, -- 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -- 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -- 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) -- }, -- 'Reporting.packageentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, -- 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), -- 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) -- }, -- 'Reporting.pathentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, -- 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), -- 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), -- 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) -- }, -- 'Reporting.performance': { -- 'Meta': {'object_name': 'Performance'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), -- 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) -- }, -- 'Reporting.sebooleanentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEBooleanEntry'}, -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'value': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) -- }, -- 'Reporting.sefcontextentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEFcontextEntry'}, -- 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'filetype': ('django.db.models.fields.CharField', [], {'max_length': '16'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.seinterfaceentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEInterfaceEntry'}, -- 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.seloginentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SELoginEntry'}, -- 'current_selinuxuser': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'selinuxuser': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.semoduleentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEModuleEntry'}, -- 'current_disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -- 'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.senodeentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SENodeEntry'}, -- 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'proto': ('django.db.models.fields.CharField', [], {'max_length': '4'}), -- 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.sepermissiveentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEPermissiveEntry'}, -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.seportentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEPortEntry'}, -- 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.serviceentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, -- 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) -- }, -- 'Reporting.seuserentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEUserEntry'}, -- 'current_prefix': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -- 'current_roles': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'prefix': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'roles': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}) -- } -- } -- -- complete_apps = ['Reporting'] -\ No newline at end of file -diff --git a/src/lib/Bcfg2/Reporting/migrations/0006_add_user_group_entry_support.py b/src/lib/Bcfg2/Reporting/migrations/0006_add_user_group_entry_support.py -deleted file mode 100644 -index d86e663..0000000 ---- a/src/lib/Bcfg2/Reporting/migrations/0006_add_user_group_entry_support.py -+++ /dev/null -@@ -1,340 +0,0 @@ --# -*- coding: utf-8 -*- --import datetime --from south.db import db --from south.v2 import SchemaMigration --from django.db import models -- -- --class Migration(SchemaMigration): -- -- def forwards(self, orm): -- # Adding model 'POSIXGroupEntry' -- db.create_table('Reporting_posixgroupentry', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -- ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -- ('state', self.gf('django.db.models.fields.IntegerField')()), -- ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -- ('gid', self.gf('django.db.models.fields.IntegerField')(null=True)), -- ('current_gid', self.gf('django.db.models.fields.IntegerField')(null=True)), -- )) -- db.send_create_signal('Reporting', ['POSIXGroupEntry']) -- -- # Adding model 'POSIXUserEntry' -- db.create_table('Reporting_posixuserentry', ( -- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -- ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -- ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -- ('state', self.gf('django.db.models.fields.IntegerField')()), -- ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -- ('uid', self.gf('django.db.models.fields.IntegerField')(null=True)), -- ('current_uid', self.gf('django.db.models.fields.IntegerField')(null=True)), -- ('group', self.gf('django.db.models.fields.CharField')(max_length=64)), -- ('current_group', self.gf('django.db.models.fields.CharField')(max_length=64, null=True)), -- ('gecos', self.gf('django.db.models.fields.CharField')(max_length=1024)), -- ('current_gecos', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)), -- ('home', self.gf('django.db.models.fields.CharField')(max_length=1024)), -- ('current_home', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)), -- ('shell', self.gf('django.db.models.fields.CharField')(default='/bin/bash', max_length=1024)), -- ('current_shell', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)), -- )) -- db.send_create_signal('Reporting', ['POSIXUserEntry']) -- -- # Adding M2M table for field posixusers on 'Interaction' -- db.create_table('Reporting_interaction_posixusers', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('posixuserentry', models.ForeignKey(orm['Reporting.posixuserentry'], null=False)) -- )) -- db.create_unique('Reporting_interaction_posixusers', ['interaction_id', 'posixuserentry_id']) -- -- # Adding M2M table for field posixgroups on 'Interaction' -- db.create_table('Reporting_interaction_posixgroups', ( -- ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -- ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -- ('posixgroupentry', models.ForeignKey(orm['Reporting.posixgroupentry'], null=False)) -- )) -- db.create_unique('Reporting_interaction_posixgroups', ['interaction_id', 'posixgroupentry_id']) -- -- -- def backwards(self, orm): -- # Deleting model 'POSIXGroupEntry' -- db.delete_table('Reporting_posixgroupentry') -- -- # Deleting model 'POSIXUserEntry' -- db.delete_table('Reporting_posixuserentry') -- -- # Removing M2M table for field posixusers on 'Interaction' -- db.delete_table('Reporting_interaction_posixusers') -- -- # Removing M2M table for field posixgroups on 'Interaction' -- db.delete_table('Reporting_interaction_posixgroups') -- -- -- models = { -- 'Reporting.actionentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) -- }, -- 'Reporting.bundle': { -- 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) -- }, -- 'Reporting.client': { -- 'Meta': {'object_name': 'Client'}, -- 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), -- 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), -- 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -- }, -- 'Reporting.deviceentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, -- 'current_major': ('django.db.models.fields.IntegerField', [], {}), -- 'current_minor': ('django.db.models.fields.IntegerField', [], {}), -- 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), -- 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -- 'target_major': ('django.db.models.fields.IntegerField', [], {}), -- 'target_minor': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.failureentry': { -- 'Meta': {'object_name': 'FailureEntry'}, -- 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'message': ('django.db.models.fields.TextField', [], {}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -- }, -- 'Reporting.fileacl': { -- 'Meta': {'object_name': 'FileAcl'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -- }, -- 'Reporting.fileperms': { -- 'Meta': {'unique_together': "(('owner', 'group', 'mode'),)", 'object_name': 'FilePerms'}, -- 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'mode': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -- }, -- 'Reporting.group': { -- 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, -- 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -- 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -- 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), -- 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), -- 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -- 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) -- }, -- 'Reporting.interaction': { -- 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, -- 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), -- 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -- 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), -- 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), -- 'good_count': ('django.db.models.fields.IntegerField', [], {}), -- 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), -- 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), -- 'posixgroups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.POSIXGroupEntry']", 'symmetrical': 'False'}), -- 'posixusers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.POSIXUserEntry']", 'symmetrical': 'False'}), -- 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['Reporting.Group']"}), -- 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), -- 'sebooleans': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEBooleanEntry']", 'symmetrical': 'False'}), -- 'sefcontexts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEFcontextEntry']", 'symmetrical': 'False'}), -- 'seinterfaces': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEInterfaceEntry']", 'symmetrical': 'False'}), -- 'selogins': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SELoginEntry']", 'symmetrical': 'False'}), -- 'semodules': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEModuleEntry']", 'symmetrical': 'False'}), -- 'senodes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SENodeEntry']", 'symmetrical': 'False'}), -- 'sepermissives': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEPermissiveEntry']", 'symmetrical': 'False'}), -- 'seports': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEPortEntry']", 'symmetrical': 'False'}), -- 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), -- 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), -- 'seusers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEUserEntry']", 'symmetrical': 'False'}), -- 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), -- 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), -- 'total_count': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.linkentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, -- 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -- 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -- 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) -- }, -- 'Reporting.packageentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, -- 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), -- 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) -- }, -- 'Reporting.pathentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, -- 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), -- 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), -- 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -- 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) -- }, -- 'Reporting.performance': { -- 'Meta': {'object_name': 'Performance'}, -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), -- 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) -- }, -- 'Reporting.posixgroupentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'POSIXGroupEntry'}, -- 'current_gid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'gid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.posixuserentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'POSIXUserEntry'}, -- 'current_gecos': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}), -- 'current_group': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), -- 'current_home': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}), -- 'current_shell': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}), -- 'current_uid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'gecos': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), -- 'group': ('django.db.models.fields.CharField', [], {'max_length': '64'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'home': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'shell': ('django.db.models.fields.CharField', [], {'default': "'/bin/bash'", 'max_length': '1024'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'uid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}) -- }, -- 'Reporting.sebooleanentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEBooleanEntry'}, -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'value': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) -- }, -- 'Reporting.sefcontextentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEFcontextEntry'}, -- 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'filetype': ('django.db.models.fields.CharField', [], {'max_length': '16'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.seinterfaceentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEInterfaceEntry'}, -- 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.seloginentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SELoginEntry'}, -- 'current_selinuxuser': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'selinuxuser': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.semoduleentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEModuleEntry'}, -- 'current_disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -- 'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.senodeentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SENodeEntry'}, -- 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'proto': ('django.db.models.fields.CharField', [], {'max_length': '4'}), -- 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.sepermissiveentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEPermissiveEntry'}, -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.seportentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEPortEntry'}, -- 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}) -- }, -- 'Reporting.serviceentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, -- 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}), -- 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) -- }, -- 'Reporting.seuserentry': { -- 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEUserEntry'}, -- 'current_prefix': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -- 'current_roles': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -- 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -- 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -- 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -- 'prefix': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'roles': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -- 'state': ('django.db.models.fields.IntegerField', [], {}) -- } -- } -- -- complete_apps = ['Reporting'] -\ No newline at end of file -diff --git a/src/lib/Bcfg2/Reporting/south_migrations/0001_initial.py b/src/lib/Bcfg2/Reporting/south_migrations/0001_initial.py -new file mode 100644 -index 0000000..609290e ---- /dev/null -+++ b/src/lib/Bcfg2/Reporting/south_migrations/0001_initial.py -@@ -0,0 +1,465 @@ -+# -*- coding: utf-8 -*- -+import datetime -+from south.db import db -+from south.v2 import SchemaMigration -+from django.db import models -+ -+ -+class Migration(SchemaMigration): -+ -+ def forwards(self, orm): -+ # Adding model 'Client' -+ db.create_table('Reporting_client', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('creation', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128)), -+ ('current_interaction', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='parent_client', null=True, to=orm['Reporting.Interaction'])), -+ ('expiration', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), -+ )) -+ db.send_create_signal('Reporting', ['Client']) -+ -+ # Adding model 'Interaction' -+ db.create_table('Reporting_interaction', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('client', self.gf('django.db.models.fields.related.ForeignKey')(related_name='interactions', to=orm['Reporting.Client'])), -+ ('timestamp', self.gf('django.db.models.fields.DateTimeField')(db_index=True)), -+ ('state', self.gf('django.db.models.fields.CharField')(max_length=32)), -+ ('repo_rev_code', self.gf('django.db.models.fields.CharField')(max_length=64)), -+ ('server', self.gf('django.db.models.fields.CharField')(max_length=256)), -+ ('good_count', self.gf('django.db.models.fields.IntegerField')()), -+ ('total_count', self.gf('django.db.models.fields.IntegerField')()), -+ ('bad_count', self.gf('django.db.models.fields.IntegerField')(default=0)), -+ ('modified_count', self.gf('django.db.models.fields.IntegerField')(default=0)), -+ ('extra_count', self.gf('django.db.models.fields.IntegerField')(default=0)), -+ ('profile', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['Reporting.Group'])), -+ )) -+ db.send_create_signal('Reporting', ['Interaction']) -+ -+ # Adding unique constraint on 'Interaction', fields ['client', 'timestamp'] -+ db.create_unique('Reporting_interaction', ['client_id', 'timestamp']) -+ -+ # Adding M2M table for field actions on 'Interaction' -+ db.create_table('Reporting_interaction_actions', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('actionentry', models.ForeignKey(orm['Reporting.actionentry'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_actions', ['interaction_id', 'actionentry_id']) -+ -+ # Adding M2M table for field packages on 'Interaction' -+ db.create_table('Reporting_interaction_packages', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('packageentry', models.ForeignKey(orm['Reporting.packageentry'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_packages', ['interaction_id', 'packageentry_id']) -+ -+ # Adding M2M table for field paths on 'Interaction' -+ db.create_table('Reporting_interaction_paths', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('pathentry', models.ForeignKey(orm['Reporting.pathentry'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_paths', ['interaction_id', 'pathentry_id']) -+ -+ # Adding M2M table for field services on 'Interaction' -+ db.create_table('Reporting_interaction_services', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('serviceentry', models.ForeignKey(orm['Reporting.serviceentry'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_services', ['interaction_id', 'serviceentry_id']) -+ -+ # Adding M2M table for field failures on 'Interaction' -+ db.create_table('Reporting_interaction_failures', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('failureentry', models.ForeignKey(orm['Reporting.failureentry'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_failures', ['interaction_id', 'failureentry_id']) -+ -+ # Adding M2M table for field groups on 'Interaction' -+ db.create_table('Reporting_interaction_groups', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('group', models.ForeignKey(orm['Reporting.group'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_groups', ['interaction_id', 'group_id']) -+ -+ # Adding M2M table for field bundles on 'Interaction' -+ db.create_table('Reporting_interaction_bundles', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('bundle', models.ForeignKey(orm['Reporting.bundle'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_bundles', ['interaction_id', 'bundle_id']) -+ -+ # Adding model 'Performance' -+ db.create_table('Reporting_performance', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('interaction', self.gf('django.db.models.fields.related.ForeignKey')(related_name='performance_items', to=orm['Reporting.Interaction'])), -+ ('metric', self.gf('django.db.models.fields.CharField')(max_length=128)), -+ ('value', self.gf('django.db.models.fields.DecimalField')(max_digits=32, decimal_places=16)), -+ )) -+ db.send_create_signal('Reporting', ['Performance']) -+ -+ # Adding model 'Group' -+ db.create_table('Reporting_group', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)), -+ ('profile', self.gf('django.db.models.fields.BooleanField')(default=False)), -+ ('public', self.gf('django.db.models.fields.BooleanField')(default=False)), -+ ('category', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)), -+ ('comment', self.gf('django.db.models.fields.TextField')(blank=True)), -+ )) -+ db.send_create_signal('Reporting', ['Group']) -+ -+ # Adding M2M table for field groups on 'Group' -+ db.create_table('Reporting_group_groups', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('from_group', models.ForeignKey(orm['Reporting.group'], null=False)), -+ ('to_group', models.ForeignKey(orm['Reporting.group'], null=False)) -+ )) -+ db.create_unique('Reporting_group_groups', ['from_group_id', 'to_group_id']) -+ -+ # Adding M2M table for field bundles on 'Group' -+ db.create_table('Reporting_group_bundles', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('group', models.ForeignKey(orm['Reporting.group'], null=False)), -+ ('bundle', models.ForeignKey(orm['Reporting.bundle'], null=False)) -+ )) -+ db.create_unique('Reporting_group_bundles', ['group_id', 'bundle_id']) -+ -+ # Adding model 'Bundle' -+ db.create_table('Reporting_bundle', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)), -+ )) -+ db.send_create_signal('Reporting', ['Bundle']) -+ -+ # Adding model 'FilePerms' -+ db.create_table('Reporting_fileperms', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('owner', self.gf('django.db.models.fields.CharField')(max_length=128)), -+ ('group', self.gf('django.db.models.fields.CharField')(max_length=128)), -+ ('perms', self.gf('django.db.models.fields.CharField')(max_length=128)), -+ )) -+ db.send_create_signal('Reporting', ['FilePerms']) -+ -+ # Adding unique constraint on 'FilePerms', fields ['owner', 'group', 'perms'] -+ db.create_unique('Reporting_fileperms', ['owner', 'group', 'perms']) -+ -+ # Adding model 'FileAcl' -+ db.create_table('Reporting_fileacl', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -+ )) -+ db.send_create_signal('Reporting', ['FileAcl']) -+ -+ # Adding model 'FailureEntry' -+ db.create_table('Reporting_failureentry', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -+ ('hash_key', self.gf('django.db.models.fields.IntegerField')(db_index=True)), -+ ('entry_type', self.gf('django.db.models.fields.CharField')(max_length=128)), -+ ('message', self.gf('django.db.models.fields.TextField')()), -+ )) -+ db.send_create_signal('Reporting', ['FailureEntry']) -+ -+ # Adding model 'ActionEntry' -+ db.create_table('Reporting_actionentry', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -+ ('hash_key', self.gf('django.db.models.fields.IntegerField')(db_index=True)), -+ ('state', self.gf('django.db.models.fields.IntegerField')()), -+ ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -+ ('status', self.gf('django.db.models.fields.CharField')(default='check', max_length=128)), -+ ('output', self.gf('django.db.models.fields.IntegerField')(default=0)), -+ )) -+ db.send_create_signal('Reporting', ['ActionEntry']) -+ -+ # Adding model 'PackageEntry' -+ db.create_table('Reporting_packageentry', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -+ ('hash_key', self.gf('django.db.models.fields.IntegerField')(db_index=True)), -+ ('state', self.gf('django.db.models.fields.IntegerField')()), -+ ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -+ ('target_version', self.gf('django.db.models.fields.CharField')(default='', max_length=1024)), -+ ('current_version', self.gf('django.db.models.fields.CharField')(max_length=1024)), -+ ('verification_details', self.gf('django.db.models.fields.TextField')(default='')), -+ )) -+ db.send_create_signal('Reporting', ['PackageEntry']) -+ -+ # Adding model 'PathEntry' -+ db.create_table('Reporting_pathentry', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -+ ('hash_key', self.gf('django.db.models.fields.IntegerField')(db_index=True)), -+ ('state', self.gf('django.db.models.fields.IntegerField')()), -+ ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -+ ('path_type', self.gf('django.db.models.fields.CharField')(max_length=128)), -+ ('target_perms', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['Reporting.FilePerms'])), -+ ('current_perms', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['Reporting.FilePerms'])), -+ ('detail_type', self.gf('django.db.models.fields.IntegerField')(default=0)), -+ ('details', self.gf('django.db.models.fields.TextField')(default='')), -+ )) -+ db.send_create_signal('Reporting', ['PathEntry']) -+ -+ # Adding M2M table for field acls on 'PathEntry' -+ db.create_table('Reporting_pathentry_acls', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('pathentry', models.ForeignKey(orm['Reporting.pathentry'], null=False)), -+ ('fileacl', models.ForeignKey(orm['Reporting.fileacl'], null=False)) -+ )) -+ db.create_unique('Reporting_pathentry_acls', ['pathentry_id', 'fileacl_id']) -+ -+ # Adding model 'LinkEntry' -+ db.create_table('Reporting_linkentry', ( -+ ('pathentry_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['Reporting.PathEntry'], unique=True, primary_key=True)), -+ ('target_path', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)), -+ ('current_path', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)), -+ )) -+ db.send_create_signal('Reporting', ['LinkEntry']) -+ -+ # Adding model 'DeviceEntry' -+ db.create_table('Reporting_deviceentry', ( -+ ('pathentry_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['Reporting.PathEntry'], unique=True, primary_key=True)), -+ ('device_type', self.gf('django.db.models.fields.CharField')(max_length=16)), -+ ('target_major', self.gf('django.db.models.fields.IntegerField')()), -+ ('target_minor', self.gf('django.db.models.fields.IntegerField')()), -+ ('current_major', self.gf('django.db.models.fields.IntegerField')()), -+ ('current_minor', self.gf('django.db.models.fields.IntegerField')()), -+ )) -+ db.send_create_signal('Reporting', ['DeviceEntry']) -+ -+ # Adding model 'ServiceEntry' -+ db.create_table('Reporting_serviceentry', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -+ ('hash_key', self.gf('django.db.models.fields.IntegerField')(db_index=True)), -+ ('state', self.gf('django.db.models.fields.IntegerField')()), -+ ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -+ ('target_status', self.gf('django.db.models.fields.CharField')(default='', max_length=128)), -+ ('current_status', self.gf('django.db.models.fields.CharField')(default='', max_length=128)), -+ )) -+ db.send_create_signal('Reporting', ['ServiceEntry']) -+ -+ -+ def backwards(self, orm): -+ # Removing unique constraint on 'FilePerms', fields ['owner', 'group', 'perms'] -+ db.delete_unique('Reporting_fileperms', ['owner', 'group', 'perms']) -+ -+ # Removing unique constraint on 'Interaction', fields ['client', 'timestamp'] -+ db.delete_unique('Reporting_interaction', ['client_id', 'timestamp']) -+ -+ # Deleting model 'Client' -+ db.delete_table('Reporting_client') -+ -+ # Deleting model 'Interaction' -+ db.delete_table('Reporting_interaction') -+ -+ # Removing M2M table for field actions on 'Interaction' -+ db.delete_table('Reporting_interaction_actions') -+ -+ # Removing M2M table for field packages on 'Interaction' -+ db.delete_table('Reporting_interaction_packages') -+ -+ # Removing M2M table for field paths on 'Interaction' -+ db.delete_table('Reporting_interaction_paths') -+ -+ # Removing M2M table for field services on 'Interaction' -+ db.delete_table('Reporting_interaction_services') -+ -+ # Removing M2M table for field failures on 'Interaction' -+ db.delete_table('Reporting_interaction_failures') -+ -+ # Removing M2M table for field groups on 'Interaction' -+ db.delete_table('Reporting_interaction_groups') -+ -+ # Removing M2M table for field bundles on 'Interaction' -+ db.delete_table('Reporting_interaction_bundles') -+ -+ # Deleting model 'Performance' -+ db.delete_table('Reporting_performance') -+ -+ # Deleting model 'Group' -+ db.delete_table('Reporting_group') -+ -+ # Removing M2M table for field groups on 'Group' -+ db.delete_table('Reporting_group_groups') -+ -+ # Removing M2M table for field bundles on 'Group' -+ db.delete_table('Reporting_group_bundles') -+ -+ # Deleting model 'Bundle' -+ db.delete_table('Reporting_bundle') -+ -+ # Deleting model 'FilePerms' -+ db.delete_table('Reporting_fileperms') -+ -+ # Deleting model 'FileAcl' -+ db.delete_table('Reporting_fileacl') -+ -+ # Deleting model 'FailureEntry' -+ db.delete_table('Reporting_failureentry') -+ -+ # Deleting model 'ActionEntry' -+ db.delete_table('Reporting_actionentry') -+ -+ # Deleting model 'PackageEntry' -+ db.delete_table('Reporting_packageentry') -+ -+ # Deleting model 'PathEntry' -+ db.delete_table('Reporting_pathentry') -+ -+ # Removing M2M table for field acls on 'PathEntry' -+ db.delete_table('Reporting_pathentry_acls') -+ -+ # Deleting model 'LinkEntry' -+ db.delete_table('Reporting_linkentry') -+ -+ # Deleting model 'DeviceEntry' -+ db.delete_table('Reporting_deviceentry') -+ -+ # Deleting model 'ServiceEntry' -+ db.delete_table('Reporting_serviceentry') -+ -+ -+ models = { -+ 'Reporting.actionentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) -+ }, -+ 'Reporting.bundle': { -+ 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) -+ }, -+ 'Reporting.client': { -+ 'Meta': {'object_name': 'Client'}, -+ 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), -+ 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), -+ 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -+ }, -+ 'Reporting.deviceentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, -+ 'current_major': ('django.db.models.fields.IntegerField', [], {}), -+ 'current_minor': ('django.db.models.fields.IntegerField', [], {}), -+ 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), -+ 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -+ 'target_major': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_minor': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.failureentry': { -+ 'Meta': {'object_name': 'FailureEntry'}, -+ 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'message': ('django.db.models.fields.TextField', [], {}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -+ }, -+ 'Reporting.fileacl': { -+ 'Meta': {'object_name': 'FileAcl'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -+ }, -+ 'Reporting.fileperms': { -+ 'Meta': {'unique_together': "(('owner', 'group', 'perms'),)", 'object_name': 'FilePerms'}, -+ 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'perms': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -+ }, -+ 'Reporting.group': { -+ 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, -+ 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -+ 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -+ 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), -+ 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), -+ 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -+ 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) -+ }, -+ 'Reporting.interaction': { -+ 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, -+ 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), -+ 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -+ 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), -+ 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), -+ 'good_count': ('django.db.models.fields.IntegerField', [], {}), -+ 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), -+ 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), -+ 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.Group']"}), -+ 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), -+ 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), -+ 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), -+ 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), -+ 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), -+ 'total_count': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.linkentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, -+ 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -+ 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -+ 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) -+ }, -+ 'Reporting.packageentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, -+ 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), -+ 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) -+ }, -+ 'Reporting.pathentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, -+ 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), -+ 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), -+ 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) -+ }, -+ 'Reporting.performance': { -+ 'Meta': {'object_name': 'Performance'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), -+ 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) -+ }, -+ 'Reporting.serviceentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, -+ 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) -+ } -+ } -+ -+ complete_apps = ['Reporting'] -\ No newline at end of file -diff --git a/src/lib/Bcfg2/Reporting/south_migrations/0002_convert_perms_to_mode.py b/src/lib/Bcfg2/Reporting/south_migrations/0002_convert_perms_to_mode.py -new file mode 100644 -index 0000000..668094c ---- /dev/null -+++ b/src/lib/Bcfg2/Reporting/south_migrations/0002_convert_perms_to_mode.py -@@ -0,0 +1,171 @@ -+# -*- coding: utf-8 -*- -+import datetime -+from south.db import db -+from south.v2 import SchemaMigration -+from django.db import models -+ -+from Bcfg2 import settings -+ -+class Migration(SchemaMigration): -+ -+ def forwards(self, orm): -+ # Removing unique constraint on 'FilePerms', fields ['owner', 'perms', 'group'] -+ db.delete_unique('Reporting_fileperms', ['owner', 'perms', 'group']) -+ -+ # Renaming field 'FilePerms.perms' to 'FilePerms.mode' -+ db.rename_column('Reporting_fileperms', 'perms', 'mode') -+ -+ if not settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3': -+ # Adding unique constraint on 'FilePerms', fields ['owner', 'group', 'mode'] -+ db.create_unique('Reporting_fileperms', ['owner', 'group', 'mode']) -+ -+ -+ def backwards(self, orm): -+ # Removing unique constraint on 'FilePerms', fields ['owner', 'group', 'mode'] -+ db.delete_unique('Reporting_fileperms', ['owner', 'group', 'mode']) -+ -+ # Renaming field 'FilePerms.mode' to 'FilePerms.perms' -+ db.rename_column('Reporting_fileperms', 'mode', 'perms') -+ -+ if not settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3': -+ # Adding unique constraint on 'FilePerms', fields ['owner', 'perms', 'group'] -+ db.create_unique('Reporting_fileperms', ['owner', 'perms', 'group']) -+ -+ -+ models = { -+ 'Reporting.actionentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) -+ }, -+ 'Reporting.bundle': { -+ 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) -+ }, -+ 'Reporting.client': { -+ 'Meta': {'object_name': 'Client'}, -+ 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), -+ 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), -+ 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -+ }, -+ 'Reporting.deviceentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, -+ 'current_major': ('django.db.models.fields.IntegerField', [], {}), -+ 'current_minor': ('django.db.models.fields.IntegerField', [], {}), -+ 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), -+ 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -+ 'target_major': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_minor': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.failureentry': { -+ 'Meta': {'object_name': 'FailureEntry'}, -+ 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'message': ('django.db.models.fields.TextField', [], {}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -+ }, -+ 'Reporting.fileacl': { -+ 'Meta': {'object_name': 'FileAcl'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -+ }, -+ 'Reporting.fileperms': { -+ 'Meta': {'unique_together': "(('owner', 'group', 'mode'),)", 'object_name': 'FilePerms'}, -+ 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'mode': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -+ }, -+ 'Reporting.group': { -+ 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, -+ 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -+ 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -+ 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), -+ 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), -+ 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -+ 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) -+ }, -+ 'Reporting.interaction': { -+ 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, -+ 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), -+ 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -+ 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), -+ 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), -+ 'good_count': ('django.db.models.fields.IntegerField', [], {}), -+ 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), -+ 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), -+ 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.Group']"}), -+ 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), -+ 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), -+ 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), -+ 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), -+ 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), -+ 'total_count': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.linkentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, -+ 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -+ 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -+ 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) -+ }, -+ 'Reporting.packageentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, -+ 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), -+ 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) -+ }, -+ 'Reporting.pathentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, -+ 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), -+ 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), -+ 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) -+ }, -+ 'Reporting.performance': { -+ 'Meta': {'object_name': 'Performance'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), -+ 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) -+ }, -+ 'Reporting.serviceentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, -+ 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) -+ } -+ } -+ -+ complete_apps = ['Reporting'] -diff --git a/src/lib/Bcfg2/Reporting/south_migrations/0003_expand_hash_key.py b/src/lib/Bcfg2/Reporting/south_migrations/0003_expand_hash_key.py -new file mode 100644 -index 0000000..2da1fa7 ---- /dev/null -+++ b/src/lib/Bcfg2/Reporting/south_migrations/0003_expand_hash_key.py -@@ -0,0 +1,180 @@ -+# -*- coding: utf-8 -*- -+import datetime -+from south.db import db -+from south.v2 import SchemaMigration -+from django.db import models -+ -+ -+class Migration(SchemaMigration): -+ -+ def forwards(self, orm): -+ -+ # Changing field 'FailureEntry.hash_key' -+ db.alter_column('Reporting_failureentry', 'hash_key', self.gf('django.db.models.fields.BigIntegerField')()) -+ -+ # Changing field 'PackageEntry.hash_key' -+ db.alter_column('Reporting_packageentry', 'hash_key', self.gf('django.db.models.fields.BigIntegerField')()) -+ -+ # Changing field 'ServiceEntry.hash_key' -+ db.alter_column('Reporting_serviceentry', 'hash_key', self.gf('django.db.models.fields.BigIntegerField')()) -+ -+ # Changing field 'PathEntry.hash_key' -+ db.alter_column('Reporting_pathentry', 'hash_key', self.gf('django.db.models.fields.BigIntegerField')()) -+ -+ # Changing field 'ActionEntry.hash_key' -+ db.alter_column('Reporting_actionentry', 'hash_key', self.gf('django.db.models.fields.BigIntegerField')()) -+ -+ def backwards(self, orm): -+ -+ # Changing field 'FailureEntry.hash_key' -+ db.alter_column('Reporting_failureentry', 'hash_key', self.gf('django.db.models.fields.IntegerField')()) -+ -+ # Changing field 'PackageEntry.hash_key' -+ db.alter_column('Reporting_packageentry', 'hash_key', self.gf('django.db.models.fields.IntegerField')()) -+ -+ # Changing field 'ServiceEntry.hash_key' -+ db.alter_column('Reporting_serviceentry', 'hash_key', self.gf('django.db.models.fields.IntegerField')()) -+ -+ # Changing field 'PathEntry.hash_key' -+ db.alter_column('Reporting_pathentry', 'hash_key', self.gf('django.db.models.fields.IntegerField')()) -+ -+ # Changing field 'ActionEntry.hash_key' -+ db.alter_column('Reporting_actionentry', 'hash_key', self.gf('django.db.models.fields.IntegerField')()) -+ -+ models = { -+ 'Reporting.actionentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) -+ }, -+ 'Reporting.bundle': { -+ 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) -+ }, -+ 'Reporting.client': { -+ 'Meta': {'object_name': 'Client'}, -+ 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), -+ 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), -+ 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -+ }, -+ 'Reporting.deviceentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, -+ 'current_major': ('django.db.models.fields.IntegerField', [], {}), -+ 'current_minor': ('django.db.models.fields.IntegerField', [], {}), -+ 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), -+ 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -+ 'target_major': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_minor': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.failureentry': { -+ 'Meta': {'object_name': 'FailureEntry'}, -+ 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'message': ('django.db.models.fields.TextField', [], {}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -+ }, -+ 'Reporting.fileacl': { -+ 'Meta': {'object_name': 'FileAcl'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -+ }, -+ 'Reporting.fileperms': { -+ 'Meta': {'unique_together': "(('owner', 'group', 'mode'),)", 'object_name': 'FilePerms'}, -+ 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'mode': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -+ }, -+ 'Reporting.group': { -+ 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, -+ 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -+ 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -+ 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), -+ 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), -+ 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -+ 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) -+ }, -+ 'Reporting.interaction': { -+ 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, -+ 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), -+ 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -+ 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), -+ 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), -+ 'good_count': ('django.db.models.fields.IntegerField', [], {}), -+ 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), -+ 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), -+ 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.Group']"}), -+ 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), -+ 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), -+ 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), -+ 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), -+ 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), -+ 'total_count': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.linkentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, -+ 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -+ 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -+ 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) -+ }, -+ 'Reporting.packageentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, -+ 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), -+ 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) -+ }, -+ 'Reporting.pathentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, -+ 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), -+ 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), -+ 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) -+ }, -+ 'Reporting.performance': { -+ 'Meta': {'object_name': 'Performance'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), -+ 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) -+ }, -+ 'Reporting.serviceentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, -+ 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) -+ } -+ } -+ -+ complete_apps = ['Reporting'] -\ No newline at end of file -diff --git a/src/lib/Bcfg2/Reporting/south_migrations/0004_profile_can_be_null.py b/src/lib/Bcfg2/Reporting/south_migrations/0004_profile_can_be_null.py -new file mode 100644 -index 0000000..26a053b ---- /dev/null -+++ b/src/lib/Bcfg2/Reporting/south_migrations/0004_profile_can_be_null.py -@@ -0,0 +1,156 @@ -+# -*- coding: utf-8 -*- -+import datetime -+from south.db import db -+from south.v2 import SchemaMigration -+from django.db import models -+ -+ -+class Migration(SchemaMigration): -+ -+ def forwards(self, orm): -+ -+ # Changing field 'Interaction.profile' -+ db.alter_column('Reporting_interaction', 'profile_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['Reporting.Group'])) -+ -+ def backwards(self, orm): -+ -+ # User chose to not deal with backwards NULL issues for 'Interaction.profile' -+ raise RuntimeError("Cannot reverse this migration. 'Interaction.profile' and its values cannot be restored.") -+ -+ models = { -+ 'Reporting.actionentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) -+ }, -+ 'Reporting.bundle': { -+ 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) -+ }, -+ 'Reporting.client': { -+ 'Meta': {'object_name': 'Client'}, -+ 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), -+ 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), -+ 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -+ }, -+ 'Reporting.deviceentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, -+ 'current_major': ('django.db.models.fields.IntegerField', [], {}), -+ 'current_minor': ('django.db.models.fields.IntegerField', [], {}), -+ 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), -+ 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -+ 'target_major': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_minor': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.failureentry': { -+ 'Meta': {'object_name': 'FailureEntry'}, -+ 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'message': ('django.db.models.fields.TextField', [], {}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -+ }, -+ 'Reporting.fileacl': { -+ 'Meta': {'object_name': 'FileAcl'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -+ }, -+ 'Reporting.fileperms': { -+ 'Meta': {'unique_together': "(('owner', 'group', 'mode'),)", 'object_name': 'FilePerms'}, -+ 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'mode': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -+ }, -+ 'Reporting.group': { -+ 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, -+ 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -+ 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -+ 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), -+ 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), -+ 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -+ 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) -+ }, -+ 'Reporting.interaction': { -+ 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, -+ 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), -+ 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -+ 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), -+ 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), -+ 'good_count': ('django.db.models.fields.IntegerField', [], {}), -+ 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), -+ 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), -+ 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['Reporting.Group']"}), -+ 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), -+ 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), -+ 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), -+ 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), -+ 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), -+ 'total_count': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.linkentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, -+ 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -+ 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -+ 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) -+ }, -+ 'Reporting.packageentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, -+ 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), -+ 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) -+ }, -+ 'Reporting.pathentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, -+ 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), -+ 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), -+ 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) -+ }, -+ 'Reporting.performance': { -+ 'Meta': {'object_name': 'Performance'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), -+ 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) -+ }, -+ 'Reporting.serviceentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, -+ 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) -+ } -+ } -+ -+ complete_apps = ['Reporting'] -\ No newline at end of file -diff --git a/src/lib/Bcfg2/Reporting/south_migrations/0005_add_selinux_entry_support.py b/src/lib/Bcfg2/Reporting/south_migrations/0005_add_selinux_entry_support.py -new file mode 100644 -index 0000000..d5f5d80 ---- /dev/null -+++ b/src/lib/Bcfg2/Reporting/south_migrations/0005_add_selinux_entry_support.py -@@ -0,0 +1,485 @@ -+# -*- coding: utf-8 -*- -+import datetime -+from south.db import db -+from south.v2 import SchemaMigration -+from django.db import models -+ -+ -+class Migration(SchemaMigration): -+ -+ def forwards(self, orm): -+ # Adding model 'SELoginEntry' -+ db.create_table('Reporting_seloginentry', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -+ ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -+ ('state', self.gf('django.db.models.fields.IntegerField')()), -+ ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -+ ('selinuxuser', self.gf('django.db.models.fields.CharField')(max_length=128)), -+ ('current_selinuxuser', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), -+ )) -+ db.send_create_signal('Reporting', ['SELoginEntry']) -+ -+ # Adding model 'SEUserEntry' -+ db.create_table('Reporting_seuserentry', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -+ ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -+ ('state', self.gf('django.db.models.fields.IntegerField')()), -+ ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -+ ('roles', self.gf('django.db.models.fields.CharField')(max_length=128)), -+ ('current_roles', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), -+ ('prefix', self.gf('django.db.models.fields.CharField')(max_length=128)), -+ ('current_prefix', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), -+ )) -+ db.send_create_signal('Reporting', ['SEUserEntry']) -+ -+ # Adding model 'SEBooleanEntry' -+ db.create_table('Reporting_sebooleanentry', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -+ ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -+ ('state', self.gf('django.db.models.fields.IntegerField')()), -+ ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -+ ('value', self.gf('django.db.models.fields.BooleanField')(default=True)), -+ )) -+ db.send_create_signal('Reporting', ['SEBooleanEntry']) -+ -+ # Adding model 'SENodeEntry' -+ db.create_table('Reporting_senodeentry', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -+ ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -+ ('state', self.gf('django.db.models.fields.IntegerField')()), -+ ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -+ ('selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128)), -+ ('current_selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), -+ ('proto', self.gf('django.db.models.fields.CharField')(max_length=4)), -+ )) -+ db.send_create_signal('Reporting', ['SENodeEntry']) -+ -+ # Adding model 'SEFcontextEntry' -+ db.create_table('Reporting_sefcontextentry', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -+ ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -+ ('state', self.gf('django.db.models.fields.IntegerField')()), -+ ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -+ ('selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128)), -+ ('current_selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), -+ ('filetype', self.gf('django.db.models.fields.CharField')(max_length=16)), -+ )) -+ db.send_create_signal('Reporting', ['SEFcontextEntry']) -+ -+ # Adding model 'SEInterfaceEntry' -+ db.create_table('Reporting_seinterfaceentry', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -+ ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -+ ('state', self.gf('django.db.models.fields.IntegerField')()), -+ ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -+ ('selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128)), -+ ('current_selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), -+ )) -+ db.send_create_signal('Reporting', ['SEInterfaceEntry']) -+ -+ # Adding model 'SEPermissiveEntry' -+ db.create_table('Reporting_sepermissiveentry', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -+ ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -+ ('state', self.gf('django.db.models.fields.IntegerField')()), -+ ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -+ )) -+ db.send_create_signal('Reporting', ['SEPermissiveEntry']) -+ -+ # Adding model 'SEModuleEntry' -+ db.create_table('Reporting_semoduleentry', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -+ ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -+ ('state', self.gf('django.db.models.fields.IntegerField')()), -+ ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -+ ('disabled', self.gf('django.db.models.fields.BooleanField')(default=False)), -+ ('current_disabled', self.gf('django.db.models.fields.BooleanField')(default=False)), -+ )) -+ db.send_create_signal('Reporting', ['SEModuleEntry']) -+ -+ # Adding model 'SEPortEntry' -+ db.create_table('Reporting_seportentry', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -+ ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -+ ('state', self.gf('django.db.models.fields.IntegerField')()), -+ ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -+ ('selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128)), -+ ('current_selinuxtype', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)), -+ )) -+ db.send_create_signal('Reporting', ['SEPortEntry']) -+ -+ # Adding M2M table for field sebooleans on 'Interaction' -+ db.create_table('Reporting_interaction_sebooleans', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('sebooleanentry', models.ForeignKey(orm['Reporting.sebooleanentry'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_sebooleans', ['interaction_id', 'sebooleanentry_id']) -+ -+ # Adding M2M table for field seports on 'Interaction' -+ db.create_table('Reporting_interaction_seports', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('seportentry', models.ForeignKey(orm['Reporting.seportentry'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_seports', ['interaction_id', 'seportentry_id']) -+ -+ # Adding M2M table for field sefcontexts on 'Interaction' -+ db.create_table('Reporting_interaction_sefcontexts', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('sefcontextentry', models.ForeignKey(orm['Reporting.sefcontextentry'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_sefcontexts', ['interaction_id', 'sefcontextentry_id']) -+ -+ # Adding M2M table for field senodes on 'Interaction' -+ db.create_table('Reporting_interaction_senodes', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('senodeentry', models.ForeignKey(orm['Reporting.senodeentry'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_senodes', ['interaction_id', 'senodeentry_id']) -+ -+ # Adding M2M table for field selogins on 'Interaction' -+ db.create_table('Reporting_interaction_selogins', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('seloginentry', models.ForeignKey(orm['Reporting.seloginentry'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_selogins', ['interaction_id', 'seloginentry_id']) -+ -+ # Adding M2M table for field seusers on 'Interaction' -+ db.create_table('Reporting_interaction_seusers', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('seuserentry', models.ForeignKey(orm['Reporting.seuserentry'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_seusers', ['interaction_id', 'seuserentry_id']) -+ -+ # Adding M2M table for field seinterfaces on 'Interaction' -+ db.create_table('Reporting_interaction_seinterfaces', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('seinterfaceentry', models.ForeignKey(orm['Reporting.seinterfaceentry'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_seinterfaces', ['interaction_id', 'seinterfaceentry_id']) -+ -+ # Adding M2M table for field sepermissives on 'Interaction' -+ db.create_table('Reporting_interaction_sepermissives', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('sepermissiveentry', models.ForeignKey(orm['Reporting.sepermissiveentry'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_sepermissives', ['interaction_id', 'sepermissiveentry_id']) -+ -+ # Adding M2M table for field semodules on 'Interaction' -+ db.create_table('Reporting_interaction_semodules', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('semoduleentry', models.ForeignKey(orm['Reporting.semoduleentry'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_semodules', ['interaction_id', 'semoduleentry_id']) -+ -+ -+ def backwards(self, orm): -+ # Deleting model 'SELoginEntry' -+ db.delete_table('Reporting_seloginentry') -+ -+ # Deleting model 'SEUserEntry' -+ db.delete_table('Reporting_seuserentry') -+ -+ # Deleting model 'SEBooleanEntry' -+ db.delete_table('Reporting_sebooleanentry') -+ -+ # Deleting model 'SENodeEntry' -+ db.delete_table('Reporting_senodeentry') -+ -+ # Deleting model 'SEFcontextEntry' -+ db.delete_table('Reporting_sefcontextentry') -+ -+ # Deleting model 'SEInterfaceEntry' -+ db.delete_table('Reporting_seinterfaceentry') -+ -+ # Deleting model 'SEPermissiveEntry' -+ db.delete_table('Reporting_sepermissiveentry') -+ -+ # Deleting model 'SEModuleEntry' -+ db.delete_table('Reporting_semoduleentry') -+ -+ # Deleting model 'SEPortEntry' -+ db.delete_table('Reporting_seportentry') -+ -+ # Removing M2M table for field sebooleans on 'Interaction' -+ db.delete_table('Reporting_interaction_sebooleans') -+ -+ # Removing M2M table for field seports on 'Interaction' -+ db.delete_table('Reporting_interaction_seports') -+ -+ # Removing M2M table for field sefcontexts on 'Interaction' -+ db.delete_table('Reporting_interaction_sefcontexts') -+ -+ # Removing M2M table for field senodes on 'Interaction' -+ db.delete_table('Reporting_interaction_senodes') -+ -+ # Removing M2M table for field selogins on 'Interaction' -+ db.delete_table('Reporting_interaction_selogins') -+ -+ # Removing M2M table for field seusers on 'Interaction' -+ db.delete_table('Reporting_interaction_seusers') -+ -+ # Removing M2M table for field seinterfaces on 'Interaction' -+ db.delete_table('Reporting_interaction_seinterfaces') -+ -+ # Removing M2M table for field sepermissives on 'Interaction' -+ db.delete_table('Reporting_interaction_sepermissives') -+ -+ # Removing M2M table for field semodules on 'Interaction' -+ db.delete_table('Reporting_interaction_semodules') -+ -+ -+ models = { -+ 'Reporting.actionentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) -+ }, -+ 'Reporting.bundle': { -+ 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) -+ }, -+ 'Reporting.client': { -+ 'Meta': {'object_name': 'Client'}, -+ 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), -+ 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), -+ 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -+ }, -+ 'Reporting.deviceentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, -+ 'current_major': ('django.db.models.fields.IntegerField', [], {}), -+ 'current_minor': ('django.db.models.fields.IntegerField', [], {}), -+ 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), -+ 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -+ 'target_major': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_minor': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.failureentry': { -+ 'Meta': {'object_name': 'FailureEntry'}, -+ 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'message': ('django.db.models.fields.TextField', [], {}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -+ }, -+ 'Reporting.fileacl': { -+ 'Meta': {'object_name': 'FileAcl'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -+ }, -+ 'Reporting.fileperms': { -+ 'Meta': {'unique_together': "(('owner', 'group', 'mode'),)", 'object_name': 'FilePerms'}, -+ 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'mode': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -+ }, -+ 'Reporting.group': { -+ 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, -+ 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -+ 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -+ 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), -+ 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), -+ 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -+ 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) -+ }, -+ 'Reporting.interaction': { -+ 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, -+ 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), -+ 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -+ 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), -+ 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), -+ 'good_count': ('django.db.models.fields.IntegerField', [], {}), -+ 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), -+ 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), -+ 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['Reporting.Group']"}), -+ 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), -+ 'sebooleans': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEBooleanEntry']", 'symmetrical': 'False'}), -+ 'sefcontexts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEFcontextEntry']", 'symmetrical': 'False'}), -+ 'seinterfaces': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEInterfaceEntry']", 'symmetrical': 'False'}), -+ 'selogins': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SELoginEntry']", 'symmetrical': 'False'}), -+ 'semodules': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEModuleEntry']", 'symmetrical': 'False'}), -+ 'senodes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SENodeEntry']", 'symmetrical': 'False'}), -+ 'sepermissives': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEPermissiveEntry']", 'symmetrical': 'False'}), -+ 'seports': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEPortEntry']", 'symmetrical': 'False'}), -+ 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), -+ 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), -+ 'seusers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEUserEntry']", 'symmetrical': 'False'}), -+ 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), -+ 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), -+ 'total_count': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.linkentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, -+ 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -+ 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -+ 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) -+ }, -+ 'Reporting.packageentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, -+ 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), -+ 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) -+ }, -+ 'Reporting.pathentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, -+ 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), -+ 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), -+ 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) -+ }, -+ 'Reporting.performance': { -+ 'Meta': {'object_name': 'Performance'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), -+ 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) -+ }, -+ 'Reporting.sebooleanentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEBooleanEntry'}, -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'value': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) -+ }, -+ 'Reporting.sefcontextentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEFcontextEntry'}, -+ 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'filetype': ('django.db.models.fields.CharField', [], {'max_length': '16'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.seinterfaceentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEInterfaceEntry'}, -+ 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.seloginentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SELoginEntry'}, -+ 'current_selinuxuser': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'selinuxuser': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.semoduleentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEModuleEntry'}, -+ 'current_disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -+ 'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.senodeentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SENodeEntry'}, -+ 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'proto': ('django.db.models.fields.CharField', [], {'max_length': '4'}), -+ 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.sepermissiveentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEPermissiveEntry'}, -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.seportentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEPortEntry'}, -+ 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.serviceentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, -+ 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) -+ }, -+ 'Reporting.seuserentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEUserEntry'}, -+ 'current_prefix': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -+ 'current_roles': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'prefix': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'roles': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}) -+ } -+ } -+ -+ complete_apps = ['Reporting'] -\ No newline at end of file -diff --git a/src/lib/Bcfg2/Reporting/south_migrations/0006_add_user_group_entry_support.py b/src/lib/Bcfg2/Reporting/south_migrations/0006_add_user_group_entry_support.py -new file mode 100644 -index 0000000..d86e663 ---- /dev/null -+++ b/src/lib/Bcfg2/Reporting/south_migrations/0006_add_user_group_entry_support.py -@@ -0,0 +1,340 @@ -+# -*- coding: utf-8 -*- -+import datetime -+from south.db import db -+from south.v2 import SchemaMigration -+from django.db import models -+ -+ -+class Migration(SchemaMigration): -+ -+ def forwards(self, orm): -+ # Adding model 'POSIXGroupEntry' -+ db.create_table('Reporting_posixgroupentry', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -+ ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -+ ('state', self.gf('django.db.models.fields.IntegerField')()), -+ ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -+ ('gid', self.gf('django.db.models.fields.IntegerField')(null=True)), -+ ('current_gid', self.gf('django.db.models.fields.IntegerField')(null=True)), -+ )) -+ db.send_create_signal('Reporting', ['POSIXGroupEntry']) -+ -+ # Adding model 'POSIXUserEntry' -+ db.create_table('Reporting_posixuserentry', ( -+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), -+ ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), -+ ('hash_key', self.gf('django.db.models.fields.BigIntegerField')(db_index=True)), -+ ('state', self.gf('django.db.models.fields.IntegerField')()), -+ ('exists', self.gf('django.db.models.fields.BooleanField')(default=True)), -+ ('uid', self.gf('django.db.models.fields.IntegerField')(null=True)), -+ ('current_uid', self.gf('django.db.models.fields.IntegerField')(null=True)), -+ ('group', self.gf('django.db.models.fields.CharField')(max_length=64)), -+ ('current_group', self.gf('django.db.models.fields.CharField')(max_length=64, null=True)), -+ ('gecos', self.gf('django.db.models.fields.CharField')(max_length=1024)), -+ ('current_gecos', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)), -+ ('home', self.gf('django.db.models.fields.CharField')(max_length=1024)), -+ ('current_home', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)), -+ ('shell', self.gf('django.db.models.fields.CharField')(default='/bin/bash', max_length=1024)), -+ ('current_shell', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)), -+ )) -+ db.send_create_signal('Reporting', ['POSIXUserEntry']) -+ -+ # Adding M2M table for field posixusers on 'Interaction' -+ db.create_table('Reporting_interaction_posixusers', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('posixuserentry', models.ForeignKey(orm['Reporting.posixuserentry'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_posixusers', ['interaction_id', 'posixuserentry_id']) -+ -+ # Adding M2M table for field posixgroups on 'Interaction' -+ db.create_table('Reporting_interaction_posixgroups', ( -+ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), -+ ('interaction', models.ForeignKey(orm['Reporting.interaction'], null=False)), -+ ('posixgroupentry', models.ForeignKey(orm['Reporting.posixgroupentry'], null=False)) -+ )) -+ db.create_unique('Reporting_interaction_posixgroups', ['interaction_id', 'posixgroupentry_id']) -+ -+ -+ def backwards(self, orm): -+ # Deleting model 'POSIXGroupEntry' -+ db.delete_table('Reporting_posixgroupentry') -+ -+ # Deleting model 'POSIXUserEntry' -+ db.delete_table('Reporting_posixuserentry') -+ -+ # Removing M2M table for field posixusers on 'Interaction' -+ db.delete_table('Reporting_interaction_posixusers') -+ -+ # Removing M2M table for field posixgroups on 'Interaction' -+ db.delete_table('Reporting_interaction_posixgroups') -+ -+ -+ models = { -+ 'Reporting.actionentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ActionEntry'}, -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'output': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'status': ('django.db.models.fields.CharField', [], {'default': "'check'", 'max_length': '128'}) -+ }, -+ 'Reporting.bundle': { -+ 'Meta': {'ordering': "('name',)", 'object_name': 'Bundle'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) -+ }, -+ 'Reporting.client': { -+ 'Meta': {'object_name': 'Client'}, -+ 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), -+ 'current_interaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_client'", 'null': 'True', 'to': "orm['Reporting.Interaction']"}), -+ 'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -+ }, -+ 'Reporting.deviceentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'DeviceEntry', '_ormbases': ['Reporting.PathEntry']}, -+ 'current_major': ('django.db.models.fields.IntegerField', [], {}), -+ 'current_minor': ('django.db.models.fields.IntegerField', [], {}), -+ 'device_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}), -+ 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -+ 'target_major': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_minor': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.failureentry': { -+ 'Meta': {'object_name': 'FailureEntry'}, -+ 'entry_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'message': ('django.db.models.fields.TextField', [], {}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -+ }, -+ 'Reporting.fileacl': { -+ 'Meta': {'object_name': 'FileAcl'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) -+ }, -+ 'Reporting.fileperms': { -+ 'Meta': {'unique_together': "(('owner', 'group', 'mode'),)", 'object_name': 'FilePerms'}, -+ 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'mode': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}) -+ }, -+ 'Reporting.group': { -+ 'Meta': {'ordering': "('name',)", 'object_name': 'Group'}, -+ 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -+ 'category': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -+ 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), -+ 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), -+ 'profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -+ 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) -+ }, -+ 'Reporting.interaction': { -+ 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('client', 'timestamp'),)", 'object_name': 'Interaction'}, -+ 'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ActionEntry']", 'symmetrical': 'False'}), -+ 'bad_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'bundles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Bundle']", 'symmetrical': 'False'}), -+ 'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'to': "orm['Reporting.Client']"}), -+ 'extra_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'failures': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FailureEntry']", 'symmetrical': 'False'}), -+ 'good_count': ('django.db.models.fields.IntegerField', [], {}), -+ 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.Group']", 'symmetrical': 'False'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'modified_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PackageEntry']", 'symmetrical': 'False'}), -+ 'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.PathEntry']", 'symmetrical': 'False'}), -+ 'posixgroups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.POSIXGroupEntry']", 'symmetrical': 'False'}), -+ 'posixusers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.POSIXUserEntry']", 'symmetrical': 'False'}), -+ 'profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['Reporting.Group']"}), -+ 'repo_rev_code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), -+ 'sebooleans': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEBooleanEntry']", 'symmetrical': 'False'}), -+ 'sefcontexts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEFcontextEntry']", 'symmetrical': 'False'}), -+ 'seinterfaces': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEInterfaceEntry']", 'symmetrical': 'False'}), -+ 'selogins': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SELoginEntry']", 'symmetrical': 'False'}), -+ 'semodules': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEModuleEntry']", 'symmetrical': 'False'}), -+ 'senodes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SENodeEntry']", 'symmetrical': 'False'}), -+ 'sepermissives': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEPermissiveEntry']", 'symmetrical': 'False'}), -+ 'seports': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEPortEntry']", 'symmetrical': 'False'}), -+ 'server': ('django.db.models.fields.CharField', [], {'max_length': '256'}), -+ 'services': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.ServiceEntry']", 'symmetrical': 'False'}), -+ 'seusers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.SEUserEntry']", 'symmetrical': 'False'}), -+ 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), -+ 'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), -+ 'total_count': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.linkentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'LinkEntry', '_ormbases': ['Reporting.PathEntry']}, -+ 'current_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), -+ 'pathentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['Reporting.PathEntry']", 'unique': 'True', 'primary_key': 'True'}), -+ 'target_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) -+ }, -+ 'Reporting.packageentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PackageEntry'}, -+ 'current_version': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), -+ 'verification_details': ('django.db.models.fields.TextField', [], {'default': "''"}) -+ }, -+ 'Reporting.pathentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'PathEntry'}, -+ 'acls': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Reporting.FileAcl']", 'symmetrical': 'False'}), -+ 'current_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}), -+ 'detail_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), -+ 'details': ('django.db.models.fields.TextField', [], {'default': "''"}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'path_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_perms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['Reporting.FilePerms']"}) -+ }, -+ 'Reporting.performance': { -+ 'Meta': {'object_name': 'Performance'}, -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'interaction': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'performance_items'", 'to': "orm['Reporting.Interaction']"}), -+ 'metric': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '32', 'decimal_places': '16'}) -+ }, -+ 'Reporting.posixgroupentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'POSIXGroupEntry'}, -+ 'current_gid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'gid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.posixuserentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'POSIXUserEntry'}, -+ 'current_gecos': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}), -+ 'current_group': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), -+ 'current_home': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}), -+ 'current_shell': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}), -+ 'current_uid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'gecos': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), -+ 'group': ('django.db.models.fields.CharField', [], {'max_length': '64'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'home': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'shell': ('django.db.models.fields.CharField', [], {'default': "'/bin/bash'", 'max_length': '1024'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'uid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}) -+ }, -+ 'Reporting.sebooleanentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEBooleanEntry'}, -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'value': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) -+ }, -+ 'Reporting.sefcontextentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEFcontextEntry'}, -+ 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'filetype': ('django.db.models.fields.CharField', [], {'max_length': '16'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.seinterfaceentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEInterfaceEntry'}, -+ 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.seloginentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SELoginEntry'}, -+ 'current_selinuxuser': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'selinuxuser': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.semoduleentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEModuleEntry'}, -+ 'current_disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -+ 'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.senodeentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SENodeEntry'}, -+ 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'proto': ('django.db.models.fields.CharField', [], {'max_length': '4'}), -+ 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.sepermissiveentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEPermissiveEntry'}, -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.seportentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEPortEntry'}, -+ 'current_selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'selinuxtype': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}) -+ }, -+ 'Reporting.serviceentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'ServiceEntry'}, -+ 'current_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}), -+ 'target_status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}) -+ }, -+ 'Reporting.seuserentry': { -+ 'Meta': {'ordering': "('state', 'name')", 'object_name': 'SEUserEntry'}, -+ 'current_prefix': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -+ 'current_roles': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), -+ 'exists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), -+ 'hash_key': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), -+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), -+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), -+ 'prefix': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'roles': ('django.db.models.fields.CharField', [], {'max_length': '128'}), -+ 'state': ('django.db.models.fields.IntegerField', [], {}) -+ } -+ } -+ -+ complete_apps = ['Reporting'] -\ No newline at end of file -diff --git a/src/lib/Bcfg2/Reporting/south_migrations/__init__.py b/src/lib/Bcfg2/Reporting/south_migrations/__init__.py -new file mode 100644 -index 0000000..e69de29 -diff --git a/src/lib/Bcfg2/Server/Admin/Reports.py b/src/lib/Bcfg2/Server/Admin/Reports.py -index eb97123..6399d24 100644 ---- a/src/lib/Bcfg2/Server/Admin/Reports.py -+++ b/src/lib/Bcfg2/Server/Admin/Reports.py -@@ -8,6 +8,7 @@ import traceback - from Bcfg2 import settings - - # Load django and reports stuff _after_ we know we can load settings -+import django - from django.core import management - from Bcfg2.Reporting.utils import * - -@@ -71,11 +72,14 @@ class Reports(Bcfg2.Server.Admin.Mode): - - def __init__(self, setup): - Bcfg2.Server.Admin.Mode.__init__(self, setup) -- try: -- import south -- except ImportError: -- print("Django south is required for Reporting") -- raise SystemExit(-3) -+ if django.VERSION[0] == 1 and django.VERSION[1] >= 7: -+ django.setup() -+ elif django.VERIONS[0] == 1 and django.VERSION[1] <= 6: -+ try: -+ import south -+ except ImportError: -+ print("Django south is required for Reporting") -+ raise SystemExit(-3) - - def __call__(self, args): - if len(args) == 0 or args[0] == '-h': -diff --git a/src/lib/Bcfg2/Server/Core.py b/src/lib/Bcfg2/Server/Core.py -index 44ba0fe..9d5a3a5 100644 ---- a/src/lib/Bcfg2/Server/Core.py -+++ b/src/lib/Bcfg2/Server/Core.py -@@ -248,10 +248,14 @@ class BaseCore(object): - - from django.core.exceptions import ImproperlyConfigured - from django.core import management -+ import django -+ if django.VERSION[0] == 1 and django.VERSION[1] >= 7: -+ django.setup() - try: - management.call_command("syncdb", interactive=False, - verbosity=0) - self._database_available = True -+ django.db.close_connection() - except ImproperlyConfigured: - self.logger.error("Django configuration problem: %s" % - sys.exc_info()[1]) -diff --git a/src/lib/Bcfg2/manage.py b/src/lib/Bcfg2/manage.py -index 3e4eedc..2861916 100755 ---- a/src/lib/Bcfg2/manage.py -+++ b/src/lib/Bcfg2/manage.py -@@ -1,14 +1,21 @@ - #!/usr/bin/env python --from django.core.management import execute_manager --import imp --try: -- imp.find_module('settings') # Assumed to be in the same directory. --except ImportError: -- import sys -- sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__) -- sys.exit(1) -+import os -+import sys -+import django - --import settings -+if django.VERSION[0] == 1 and django.VERSION[1] <= 6: -+ try: -+ imp.find_module('settings') # Assumed to be in the same directory. -+ except ImportError: -+ import sys -+ sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__) -+ sys.exit(1) - - if __name__ == "__main__": -- execute_manager(settings) -+ if django.VERSION[0] == 1 and django.VERSION[1] >= 7: -+ os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Bcfg2.settings') -+ -+ from django.core.management import execute_from_command_line -+ execute_from_command_line(sys.argv) -+ else: -+ execute_manager(settings) -diff --git a/src/lib/Bcfg2/settings.py b/src/lib/Bcfg2/settings.py -index 2c5466a..fd7103e 100644 ---- a/src/lib/Bcfg2/settings.py -+++ b/src/lib/Bcfg2/settings.py -@@ -144,11 +144,18 @@ INSTALLED_APPS = ( - 'django.contrib.admin', - 'Bcfg2.Server', - ) --if HAS_SOUTH: -+if HAS_DJANGO and django.VERSION[0] == 1 and django.VERSION[1] >= 7: -+ INSTALLED_APPS = INSTALLED_APPS + ( -+ 'Bcfg2.Reporting', -+ ) -+elif HAS_SOUTH: - INSTALLED_APPS = INSTALLED_APPS + ( - 'south', - 'Bcfg2.Reporting', - ) -+ SOUTH_MIGRATION_MODULES = { -+ 'Bcfg2.Reporting': 'Bcfg2.Reporting.south_migrations', -+ } - if 'BCFG2_LEGACY_MODELS' in os.environ: - INSTALLED_APPS += ('Bcfg2.Server.Reports.reports',) - -@@ -224,3 +231,5 @@ else: - 'django.core.context_processors.media', - 'django.core.context_processors.request' - ) -+ -+TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner' -diff --git a/src/sbin/bcfg2-reports b/src/sbin/bcfg2-reports -index 2a8447a..7fc2da1 100755 ---- a/src/sbin/bcfg2-reports -+++ b/src/sbin/bcfg2-reports -@@ -23,6 +23,9 @@ sys.path.pop() - # Set DJANGO_SETTINGS_MODULE appropriately. - os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % project_name - -+import django -+if django.VERSION[0] == 1 and django.VERSION[1] >= 7: -+ django.setup() - from Bcfg2.Reporting.models import (Client, BaseEntry) - from django import db - diff -Nru bcfg2-1.3.5/debian/patches/1000-nagiosgen-ipv6.patch bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/patches/1000-nagiosgen-ipv6.patch --- bcfg2-1.3.5/debian/patches/1000-nagiosgen-ipv6.patch 2014-05-02 20:52:57.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/patches/1000-nagiosgen-ipv6.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -From f7058b792570f9a2aaa83a55cdf51df8872de02e Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?St=C3=A9phane=20Graber?= -Date: Thu, 11 Jul 2013 22:52:13 -0400 -Subject: [PATCH] Replace use of gethostbyname by getaddrinfo - -This replaces the remaining gethostbyname() call by the equivalent -getaddrinfo() call required to properly cope with hosts being only -reachable over IPv6. ---- - src/lib/Bcfg2/Server/Plugins/NagiosGen.py | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -Index: bcfg2-1.3.3/src/lib/Bcfg2/Server/Plugins/NagiosGen.py -=================================================================== ---- bcfg2-1.3.3.orig/src/lib/Bcfg2/Server/Plugins/NagiosGen.py 2014-02-15 22:12:22.700395469 -0500 -+++ bcfg2-1.3.3/src/lib/Bcfg2/Server/Plugins/NagiosGen.py 2014-02-15 22:13:58.600391471 -0500 -@@ -42,8 +42,8 @@ - def createhostconfig(self, entry, metadata): - """Build host specific configuration file.""" - try: -- host_address = socket.gethostbyname(metadata.hostname) -- except socket.gaierror: -+ host_address = socket.getaddrinfo(metadata.hostname, None)[0][4][0] -+ except socket.error: - self.logger.error("Failed to find IP address for %s" % - metadata.hostname) - raise Bcfg2.Server.Plugin.PluginExecutionError diff -Nru bcfg2-1.3.5/debian/patches/1001-nagiosgen-sort-groups.patch bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/patches/1001-nagiosgen-sort-groups.patch --- bcfg2-1.3.5/debian/patches/1001-nagiosgen-sort-groups.patch 2014-05-02 20:52:57.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/patches/1001-nagiosgen-sort-groups.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -From d5c70ba8b220ac7ec9a7d462ce1a896a660d8d26 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?St=C3=A9phane=20Graber?= -Date: Tue, 12 Nov 2013 20:57:10 -0500 -Subject: [PATCH] NagiosGen: Sort the hostgroup list - -This prevents the hostgroup list from flipping at random. ---- - src/lib/Bcfg2/Server/Plugins/NagiosGen.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -Index: bcfg2-1.3.3/src/lib/Bcfg2/Server/Plugins/NagiosGen.py -=================================================================== ---- bcfg2-1.3.3.orig/src/lib/Bcfg2/Server/Plugins/NagiosGen.py 2014-02-15 22:14:37.896389833 -0500 -+++ bcfg2-1.3.3/src/lib/Bcfg2/Server/Plugins/NagiosGen.py 2014-02-15 22:14:37.892389833 -0500 -@@ -56,7 +56,7 @@ - - if host_groups: - host_config.append(self.line_fmt % ("hostgroups", -- ",".join(host_groups))) -+ ",".join(sorted(host_groups)))) - - # read the config - xtra = dict() diff -Nru bcfg2-1.3.5/debian/patches/series bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/patches/series --- bcfg2-1.3.5/debian/patches/series 2015-10-24 22:13:31.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/debian/patches/series 2017-08-10 23:03:16.000000000 +0000 @@ -1,5 +1,2 @@ 0001-Update-Apache-configuration-for-version-2.4.patch 0002-Modify-systemd-service-files-to-suit-Debian.patch -0003-make-Bcfg2-compatible-with-django-1.7.patch -1000-nagiosgen-ipv6.patch -1001-nagiosgen-sort-groups.patch diff -Nru bcfg2-1.3.5/doc/appendix/files/mysql.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/files/mysql.txt --- bcfg2-1.3.5/doc/appendix/files/mysql.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/files/mysql.txt 2017-01-10 19:18:17.000000000 +0000 @@ -8,14 +8,14 @@ MySQL example ============= -I had some time ago to continue with putting my configuration into +I had some time ago to continue with putting my configuration into Bcfg2 and maybe this helps someone else. I added a new bundle: .. code-block:: xml - + @@ -32,9 +32,9 @@ mysql --defaults-extra-file=/etc/mysql/debian.cnf mysql \ < /root/bcfg2-install/mysql/users.sql -On debian there is a user account in ``/etc/mysql/debian.cnf`` -automatically created, but you could also (manually) create a -user in the database that has enough permissions and add the +On debian there is a user account in ``/etc/mysql/debian.cnf`` +automatically created, but you could also (manually) create a +user in the database that has enough permissions and add the login information in a file yourself. This file looks like this:: [client] diff -Nru bcfg2-1.3.5/doc/appendix/files/ntp.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/files/ntp.txt --- bcfg2-1.3.5/doc/appendix/files/ntp.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/files/ntp.txt 2017-01-10 19:18:17.000000000 +0000 @@ -13,7 +13,7 @@ * After each change, run ``bcfg-repo-validate -v`` * Run the server with ``bcfg2-server -v`` * Update the client with ``bcfg2 -v -d -n`` (will not actually make - client changes) + client changes) Package only ------------ @@ -43,7 +43,7 @@ .. code-block:: xml - + @@ -75,7 +75,7 @@ .. code-block:: xml - + @@ -85,16 +85,14 @@ Setup an ``etc/`` directory structure, and add it to the base:: - # cat Cfg/etc/ntp.conf/ntp.conf + # cat Cfg/etc/ntp.conf/ntp.conf server ntp1.utexas.edu -``Base/base.xml``: - ``Bundler/ntp.xml``: .. code-block:: xml - + @@ -114,18 +112,18 @@ packages are upgraded, so that they can be repaired if the package install clobbered them. * Services associated with a bundle get restarted whenever any entity - in that bundle is modified. This ensures that new configuration - files and software are used after installation. + in that bundle is modified. This ensures that new configuration + files and software are used after installation. The config file, package, and service are really all related -components describing the idea of an ntp client, so they should be +components describing the idea of an ntp client, so they should be logically grouped together. We use a bundle to accomplish this. ``Bundler/ntp.xml``: .. code-block:: xml - + diff -Nru bcfg2-1.3.5/doc/appendix/guides/authentication.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/authentication.txt --- bcfg2-1.3.5/doc/appendix/guides/authentication.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/authentication.txt 2017-01-10 19:18:17.000000000 +0000 @@ -37,7 +37,6 @@ per-client bcfg2.conf from the per-client metadata:: [communication] - protocol = xmlrpc/ssl #if $self.metadata.uuid != None user = $self.metadata.uuid #end if @@ -146,7 +145,7 @@ +-------------------+------------------------------------------+ ``cert+password`` is the default. This can be changed by setting the -``authentication`` parameter in the ``[communcation]`` section of +``authentication`` parameter in the ``[communication]`` section of ``bcfg2.conf``. For instance, to set ``bootstrap`` mode as the global default, you would add the following to ``bcfg2.conf``:: diff -Nru bcfg2-1.3.5/doc/appendix/guides/centos.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/centos.txt --- bcfg2-1.3.5/doc/appendix/guides/centos.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/centos.txt 2017-01-10 19:18:17.000000000 +0000 @@ -102,7 +102,7 @@ Excluding Packages in global exclude list Finished Loaded tool drivers: - Action Chkconfig POSIX YUMng + Action Chkconfig POSIX YUM Phase: initial Correct entries: 0 @@ -132,7 +132,6 @@ [communication] - protocol = xmlrpc/ssl password = N41lMNeW ca = /etc/bcfg2.crt @@ -147,7 +146,7 @@ Excluding Packages in global exclude list Finished Loaded tool drivers: - Action Chkconfig POSIX YUMng + Action Chkconfig POSIX YUM Phase: initial Correct entries: 0 @@ -176,7 +175,7 @@ line of ``bcfg2.conf``. Then create Packages layout (as per :ref:`packages-exampleusage`) in ``/var/lib/bcfg2`` -.. note:: I am using the RawURL syntax here since we are using `mrepo`_ +.. note:: I am using the rawurl syntax here since we are using `mrepo`_ to manage our yum mirrors. .. _mrepo: http://dag.wieers.com/home-made/mrepo/ @@ -184,37 +183,36 @@ .. code-block:: xml - - - x86_64 + + + + x86_64 - - x86_64 + + x86_64 - - x86_64 + + x86_64 + -Due to the :ref:`server-plugins-generators-packages-magic-groups`, -we need to modify our Metadata. Let's add a **centos5.4** group which -inherits a **centos** group (this should replace the existing **redhat** -group) present in ``/var/lib/bcfg2/Metadata/groups.xml``. The resulting -file should look something like this - -.. note:: - - The reason we are creating a release-specific group in this case is - that the YUMSource above is specific to the 5.4 release of centos. - That is, it should not apply to other releases (5.1, 5.3, etc). +To make these sources apply to our centos 5 clients, we need to modify +our Metadata. Let's add a **centos5** group which inherits a +**centos** group (this should replace the existing **redhat** group) +present in ``/var/lib/bcfg2/Metadata/groups.xml``. The resulting file +should look something like this .. code-block:: xml - + - + @@ -238,7 +236,7 @@ the Probe.:: [root@centos ~]# grep plugins /etc/bcfg2.conf - plugins = Base,Bundler,Cfg,...,Probes + plugins = Bundler,Cfg,...,Probes [root@centos ~]# mkdir /var/lib/bcfg2/Probes [root@centos ~]# cat /var/lib/bcfg2/Probes/groups #!/bin/sh @@ -260,9 +258,8 @@ .. code-block:: xml - [root@centos ~]# cat /var/lib/bcfg2/Bundler/base-packages.xml - - + + You need to reference the bundle from your Metadata. The resulting @@ -272,7 +269,7 @@ - + Now if we run the client, we can see what this has done for us.:: @@ -286,7 +283,7 @@ Excluding Packages in global exclude list Finished Loaded tool drivers: - Action Chkconfig POSIX YUMng + Action Chkconfig POSIX YUM Package pam failed verification. Phase: initial @@ -331,7 +328,7 @@ Excluding Packages in global exclude list Finished Loaded tool drivers: - Action Chkconfig POSIX YUMng + Action Chkconfig POSIX YUM Extra Package openssh-clients 4.3p2-36.el5_4.4.x86_64. Extra Package libuser 0.54.7-2.1el5_4.1.x86_64. ... @@ -359,22 +356,22 @@ .. code-block:: xml - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + Now when I run the client, you can see I have only one unmanaged @@ -389,9 +386,7 @@ Excluding Packages in global exclude list Finished Loaded tool drivers: - Action Chkconfig POSIX YUMng - Extra Package gpg-pubkey e8562897-459f07a4.None. - Extra Package gpg-pubkey 217521f6-45e8a532.None. + Action Chkconfig POSIX YUM Phase: initial Correct entries: 187 @@ -405,96 +400,11 @@ Incorrect entries: 0 Total managed entries: 187 Unmanaged entries: 16 - Package:gpg-pubkey Service:atd Service:avahi-daemon Service:bcfg2-server ... -The gpg-pubkey packages are special in that they are not really -packages. Currently, the way to manage them is using :ref:`BoundEntries -`. So, after adding them, our Bundle now looks like this - -.. note:: This does not actually control the contents of the files, - you will need to do this part separately (see below). - -.. code-block:: xml - - - - - - - - - - - - - - - - - - - - - - - -.. note:: - - version="foo" is just a dummy attribute for the gpg-pubkey Package - -To actually push the gpg keys out via Bcfg2, you will need to manage the -files as well. This can be done by adding Path entries for each of the -gpg keys you want to manage - -.. code-block:: xml - - - - - - - - - - - - - - - - - - - - - - - - - -Then add the files to Cfg:: - - mkdir -p Cfg/etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-5 - cp /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-5 !$/RPM-GPG-KEY-CentOS-5 - mkdir -p Cfg/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL - cp /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL !$/RPM-GPG-KEY-EPEL - -You will also want to add an *important* attribute to these files so -that they are installed on the client prior to any attempts to install -the **gpg-pubkey** rpm packages. This is especially important during the -bootstrapping phase and can be accomplished using an :ref:`server-info` -file that looks like the following: - -.. code-block:: xml - - - - - Now, running the client shows only unmanaged Service entries. Woohoo! Manage services @@ -528,22 +438,22 @@ [root@centos ~]# cat /var/lib/bcfg2/Rules/services.xml - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + Now we run the client and see there are no more unmanaged entries!:: @@ -557,7 +467,7 @@ Excluding Packages in global exclude list Finished Loaded tool drivers: - Action Chkconfig POSIX YUMng + Action Chkconfig POSIX YUM Phase: initial Correct entries: 205 diff -Nru bcfg2-1.3.5/doc/appendix/guides/converging_rhel5.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/converging_rhel5.txt --- bcfg2-1.3.5/doc/appendix/guides/converging_rhel5.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/converging_rhel5.txt 2017-01-10 19:18:17.000000000 +0000 @@ -24,7 +24,8 @@ sudo yum remove PACKAGE - #. Otherwise, add ```` to the Base or Bundler configuration. + #. Otherwise, add ```` to the Bundler + configuration. * Package (dependency) @@ -38,7 +39,7 @@ * Service - #. Add ```` to the Base or Bundler configuration. + #. Add ```` to the Bundler configuration. #. Add ```` to ``/var/lib/bcfg2/Rules/services.xml``. @@ -57,8 +58,8 @@ * For example, ``/etc/motd`` to ``/var/lib/bcfg2/Cfg/etc/motd/motd``. Yes, there is an extra directory level named after the file. - #. Specify configuration files as ```` in the Base - or Bundler configuration. + #. Specify configuration files as ```` in the + Bundler configuration. #. Add directories to ``/var/lib/bcfg2/Rules/directories.xml``. For example: @@ -73,13 +74,13 @@ * Option A: Explicitly list the instances - #. Drop the ```` from the Base or Bundler configuration. + #. Drop the ```` from the Bundler configuration. #. Add an explicit ```` and ```` configuration to a new Bundle, like the following: .. code-block:: xml - + diff -Nru bcfg2-1.3.5/doc/appendix/guides/fedora.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/fedora.txt --- bcfg2-1.3.5/doc/appendix/guides/fedora.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/fedora.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,493 +0,0 @@ -.. -*- mode: rst -*- - -.. This guide is based on the Centos guide. - -.. _guide-fedora: - -====== -Fedora -====== - -This guide is work in progess. - - -This is a complete getting started guide for Fedora. With this -document you should be able to install a Bcfg2 server, a Bcfg2 client, -and change the ``/etc/motd`` file on the client. - -Prerequisites -============= - -To setup a configuration management system based on Bcfg2 only a few -prerequisites need to be fullfilled. - -* A server machine that can host the Bcfg2 -* Internet access for the installation process -* A working network with DNS - - -Install Bcfg2 From RPM -====================== - -The fastest way to get Bcfg2 onto your system is to use ``yum`` -or PackageKit. ``yum`` will pull all dependencies of Bcfg2 -automatically in. :: - - $ su -c 'yum install bcfg2-server bcfg2' - -Your system should now have the necessary software to use Bcfg2. -The next step is to set up your Bcfg2 :term:`repository`. - - -Initialize your repository -========================== - -Now that you're done with the install, you need to initialize your -repository and setup your ``/etc/bcfg2.conf``. ``bcfg2-admin init`` -is a tool which allows you to automate this: - -.. code-block:: sh - - # bcfg2-admin init - Store bcfg2 configuration in [/etc/bcfg2.conf]: - Location of bcfg2 repository [/var/lib/bcfg2]: - Directory /var/lib/bcfg2 exists. Overwrite? [y/N]:y - Input password used for communication verification (without echoing; leave blank for a random): - What is the server's hostname: [config01.local.net] - Input the server location [https://config01.local.net:6789]: - Input base Operating System for clients: - 1: Red Hat/Fedora/RHEL/RHAS/Centos - 2: SUSE/SLES - 3: Mandrake - 4: Debian - 5: Ubuntu - 6: Gentoo - 7: FreeBSD - : 1 - Generating a 1024 bit RSA private key - .......................................................++++++ - .....++++++ - writing new private key to '/etc/bcfg2.key' - ----- - Signature ok - subject=/C=US/ST=Illinois/L=Argonne/CN=config01.local.net - Getting Private key - Repository created successfuly in /var/lib/bcfg2 - -Change responses as necessary. - -Start the server -================ - -You are now ready to start your Bcfg2 server for the first time:: - - $ su -c '/etc/init.d/bcfg2-server start' - Starting Configuration Management Server: bcfg2-server [ OK ] - -To verify that everything started ok, look for the running daemon and -check the logs: - -.. code-block:: sh - - $ su -c 'tail /var/log/messages' - May 16 14:14:57 config01 bcfg2-server[2746]: service available at https://config01.local.net:6789 - May 16 14:14:57 config01 bcfg2-server[2746]: serving bcfg2-server at https://config01.local.net:6789 - May 16 14:14:57 config01 bcfg2-server[2746]: serve_forever() [start] - May 16 14:14:57 config01 bcfg2-server[2746]: Handled 16 events in 0.009s - - -Run ``bcfg2`` to be sure you are able to communicate with the server: - -.. code-block:: sh - - $ su -c 'bcfg2 -vqne' - - /usr/lib/python2.6/site-packages/Bcfg2/Client/Tools/rpmtools.py:23: DeprecationWarning: the md5 module is deprecated; use hashlib instead - import md5 - Loaded plugins: presto, refresh-packagekit - Loaded tool drivers: - Action Chkconfig POSIX YUMng - Extra Package imsettings-libs 0.108.0-2.fc13.i686. - Extra Package PackageKit-device-rebind 0.6.4-1.fc13.i686. - ... - Extra Package newt-python 0.52.11-2.fc13.i686. - Extra Package pulseaudio-gdm-hooks 0.9.21-6.fc13.i686. - - Phase: initial - Correct entries: 0 - Incorrect entries: 0 - Total managed entries: 0 - Unmanaged entries: 1314 - - - Phase: final - Correct entries: 0 - Incorrect entries: 0 - Total managed entries: 0 - Unmanaged entries: 1314 - Package:ConsoleKit Package:jasper-libs Package:pcsc-lite-libs - Package:ConsoleKit-libs Package:java-1.5.0-gcj Package:perf - ... - Package:iw Package:pcre Service:sshd - Package:jack-audio-connection-kit Package:pcsc-lite Service:udev-post - -The ``bcfg2.conf`` file contains only standard plugins so far. - -.. code-block:: sh - - $ su -c 'cat /etc/bcfg2.conf' - - [server] - repository = /var/lib/bcfg2 - plugins = SSHbase,Cfg,Pkgmgr,Rules,Metadata,Base,Bundler - - [statistics] - sendmailpath = /usr/lib/sendmail - - [database] - engine = sqlite3 - # 'postgresql', 'mysql', 'mysql_old', 'sqlite3' or 'ado_mssql'. - name = - # Or path to database file if using sqlite3. - #/etc/brpt.sqlite is default path if left empty - user = - # Not used with sqlite3. - password = - # Not used with sqlite3. - host = - # Not used with sqlite3. - port = - - [communication] - protocol = xmlrpc/ssl - password = test1234 - certificate = /etc/bcfg2.crt - key = /etc/bcfg2.key - ca = /etc/bcfg2.crt - - [components] - bcfg2 = https://config01.local.net:6789 - - -Add the machines to Bcfg2 -------------------------- - -``bcfg2-admin`` can be used to add a machine to Bcfg2 easily. You -need to know the Fully Qualified Domain Name (FQDN) of ever system -you want to control through Bcfg2. :: - - bcfg2-admin client add - -Bring your first machine under Bcfg2 control --------------------------------------------- - -Now it is time to get the first machine's configuration into the -Bcfg2 repository. The server will be the first machine. It's -already in the ``Metadata/client.xml``. - - -Setup the :ref:`server-plugins-generators-packages` plugin -++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -First, replace **Pkgmgr** with **Packages** in the plugins -line of ``bcfg2.conf``. Then create a `Packages/` directory in -``/var/lib/bcfg2`` :: - - $ su -c 'mkdir /var/lib/bcfg2/Packages' - -Create a ``packages.conf`` in the ``/var/lib/bcfg2/Packages`` directory -with the following contents:: - - [global] - -Create a ``sources.xml`` file for the packages in -``/var/lib/bcfg2/Packages`` with the following content. Choose a mirror -near your location according the `Mirror list`_ . - -.. _Mirror list: http://mirrors.fedoraproject.org/publiclist/ - -.. code-block:: xml - - - - - Fedora - i386 - x86_64 - - - - - -Due to the :ref:`server-plugins-generators-packages-magic-groups`, -we need to modify our Metadata. Let's add a **fedora13** group which -inherits a **fedora** group (this should replace the existing **redhat** -group) present in ``/var/lib/bcfg2/Metadata/groups.xml``. The resulting -file should look something like this - -.. note:: - - The reason we are creating a release-specific group in this case is - that the YUMSource above is specific to the 13th release of fedora. - That is, it should not apply to other releases (14, 15, etc). - -.. code-block:: xml - - - - - - - - - - - - - - - - - -.. note:: - When editing your xml files by hand, it is useful to occasionally - run ``bcfg2-lint`` to ensure that your xml validates properly. - -Add a probe -+++++++++++ - -The next step for the client will be to have the proper arch group -membership. For this, we will make use of the -:ref:`server-plugins-probes-dynamic-groups` capabilities of the Probes -plugin. Add **Probes** to your plugins line in ``bcfg2.conf`` and -create the Probe: - -.. code-block:: sh - - $ su -c 'mkdir /var/lib/bcfg2/Probes' - $ su -c 'cat /var/lib/bcfg2/Probes/groups' - #!/bin/sh - - echo "group:`uname -m`" - -Now a restart of ``bcfg2-server`` is needed:: - - $ su -c '/etc/init.d/bcfg2-server restart' - -To test the Probe just run ``bcfg2 -vqn``. - -.. code-block:: xml - - $ su -c 'bcfg2 -vqn' - Running probe group - Probe group has result: - group:i686 - ... - -Start managing packages -+++++++++++++++++++++++ - -Add a base-packages bundle. Let's see what happens when we just populate -it with the *yum* package. Create the ``base-packages.xml`` in your -``Bundler/`` directory with a entry for ``yum``. - -.. code-block:: xml - - $ cat /var/lib/bcfg2/Bundler/base-packages.xml - - - - -You need to reference the bundle from your ``group.xml``. The resulting -profile group might look something like this - -.. code-block:: xml - - - - - - -Now if we run the client, we can see what this has done for us.:: - - output - -As you can see, the Packages plugin has generated the dependencies -required for the yum package automatically. The ultimate goal should -be to move all the packages from the **Unmanaged** entries section -to the **Managed** entries section. So, what exactly *are* those -Unmanaged entries?:: - - output - -Now you can go through these and continue adding the packages you -want to your Bundle. After a while, I ended up with a minimal bundle -that looks like this - -.. code-block:: xml - - - - - -Now when I run the client, you can see I have only one unmanaged -package:: - - outout - -The gpg-pubkey packages are special in that they are not really -packages. Currently, the way to manage them is using -:ref:`BoundEntries `. So, after adding them, our -Bundle now looks like this - -.. note:: This does not actually control the contents of the files, - you will need to do this part separately (see below). - -.. code-block:: xml - - - - - - - - - - - - - - - - - - - - - - - -.. note:: - - version="foo" is just a dummy attribute for the gpg-pubkey Package - -To actually push the gpg keys out via Bcfg2, you will need to manage -the files as well. This can be done by adding Path entries for each -of the gpg keys you want to manage - -.. code-block:: xml - - - - - - - - - - - - - - - - - - - - - - - - - -Then add the files to Cfg:: - - mkdir -p Cfg/etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-5 - cp /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-5 !$/RPM-GPG-KEY-CentOS-5 - mkdir -p Cfg/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL - cp /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL !$/RPM-GPG-KEY-EPEL - -Now, running the client shows only unmanaged Service entries. Woohoo! - -Manage services -+++++++++++++++ - -Now let's clear up the unmanaged service entries by adding the -following entries to our bundle... - -.. code-block:: xml - - - - - - - - - - - - - - - - - - -...and bind them in Rules - -.. code-block:: xml - - [root@centos ~]# cat /var/lib/bcfg2/Rules/services.xml - - - - - - - - - - - - - - - - - - - -Now we run the client and see there are no more unmanaged entries! :: - - $ su -c 'bcfg2 -veqn' - - -Adding Plugins -++++++++++++++ - -Git ---- - -.. _Git tutorial: http://www.kernel.org/pub/software/scm/git/docs/gittutorial.html - -Adding the :ref:`server-plugins-version-git` plugins can preserve -versioning information. The first step is to add *Git* to your -plugin line:: - - plugins = Base,Bundler,Cfg,...,Git - -For tracking the configuration files in the ``/var/lib/bcfg2`` -directory a git repository need to be established:: - - git init - -For more detail about the setup of git please refer to a `git tutorial`_. -The first commit can be the empty or the allready populated directory:: - - git add . && git commit -a - -While running ``bcfg2-info`` the following line will show up:: - - Initialized git plugin with git directory = /var/lib/bcfg2/.git diff -Nru bcfg2-1.3.5/doc/appendix/guides/import-existing-ssh-keys.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/import-existing-ssh-keys.txt --- bcfg2-1.3.5/doc/appendix/guides/import-existing-ssh-keys.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/import-existing-ssh-keys.txt 2017-01-10 19:18:17.000000000 +0000 @@ -22,27 +22,36 @@ After verifying that SSHbase is listed on the plugins line in ``/etc/bcfg2.conf``, you need to create a bundle containing the -appropriate entries.:: +appropriate entries. In general, you can use a path glob: - cat > /tmp/ssh.xml << EOF - +.. code-block:: xml + + + + + +If you need more granular control -- e.g., other entries in +``/etc/ssh`` are specified in other bundles -- you can also list the +files explicity: + +.. code-block:: xml + + - + + + - + -:: - - mv /tmp/ssh.xml /var/lib/bcfg2/Bundle - Next, you need to add the ssh bundle to the client's metadata in groups.xml. @@ -91,7 +100,7 @@ stats and insert it as host-specific copies of these files in ``/var/lib/bcfg2/SSHBase``.:: - for key in ssh_host_ecdsa_key ssh_host_rsa_key ssh_host_dsa_key ssh_host_key; do + for key in ssh_host_ed25519_key ssh_host_ecdsa_key ssh_host_rsa_key ssh_host_dsa_key ssh_host_key; do sudo bcfg2-admin pull Path /etc/ssh/$key sudo bcfg2-admin pull Path /etc/ssh/$key.pub done diff -Nru bcfg2-1.3.5/doc/appendix/guides/sslca_howto.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/sslca_howto.txt --- bcfg2-1.3.5/doc/appendix/guides/sslca_howto.txt 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/sslca_howto.txt 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,182 @@ +.. -*- mode: rst -*- + +.. _appendix-guides-sslca_howto: + +==================================== + Automated Bcfg2 SSL Authentication +==================================== + +This how-to describes one possible scenario for automating SSL +certificate generation and distribution for bcfg2 client/server +communication using the :ref:`SSL CA feature +` of +:ref:`server-plugins-generators-cfg`. The process involves configuring +a certificate authority (CA), generating the CA cert and key pair, +configuring the Cfg SSL CA feature and a Bundle to use the generated +certs to authenticate the Bcfg2 client and server. + +OpenSSL CA +========== + +If you already have a SSL CA available you can skip this section, +otherwise you can easily build one on the server using openssl. The +paths should be adjusted to suite your preferences. + +#. Prepare the directories and files:: + + mkdir -p /etc/pki/CA/newcerts + mkdir /etc/pki/CA/crl + echo '01' > /etc/pki/CA/serial + touch /etc/pki/CA/index.txt + touch /etc/pki/CA/crlnumber + +#. Edit the ``openssl.cnf`` config file, and in the **[ CA_default ]** + section adjust the following parameters:: + + dir = /etc/pki # Where everything is kept + certs = /etc/pki/CA/certs # Where the issued certs are kept + database = /etc/pki/CA/index.txt # database index file. + new_certs_dir = /etc/pki/CA/newcerts # default place for new certs. + certificate = /etc/pki/CA/certs/bcfg2ca.crt # The CA certificate + serial = /etc/pki/CA/serial # The current serial number + crl_dir = /etc/pki/CA/crl # Where the issued crl are kept + crlnumber = /etc/pki/CA/crlnumber # the current crl number + crl = /etc/pki/CA/crl.pem # The current CRL + private_key = /etc/pki/CA/private/bcfg2ca.key # The private key + +#. Create the CA root certificate and key pair. You'll be asked to + supply a passphrase, and some organizational info. The most + important bit is **Common Name** which you should set to be the + hostname of your bcfg2 server that your clients will see when doing + a reverse DNS query on it's ip address.:: + + openssl req -new -x509 -extensions v3_ca -keyout bcfg2ca.key \ + -out bcfg2ca.crt -days 3650 + +#. Move the generated cert and key to the locations specified in + ``openssl.cnf``:: + + mv bcfg2ca.key /etc/pki/CA/private/ + mv bcfg2ca.crt /etc/pki/CA/certs/ + +Your self-signing CA is now ready to use. + +Bcfg2 +===== + +SSL CA Feature +-------------- + +The SSL CA feature of Cfg was not designed specifically to manage +Bcfg2 client/server communication, though it is certainly able to +provide certificate generation and management services for that +purpose. You'll need to configure Cfg as described in +:ref:`server-plugins-generators-cfg-ssl-certificates`, including: + +* Configuring a ``[sslca_default]`` section in ``bcfg2.conf`` that + describes the CA you created above; +* Creating ``Cfg/etc/pki/tls/certs/bcfg2client.crt/sslcert.xml`` and + ``Cfg/etc/pki/tls/private/bcfg2client.key/sslkey.xml`` to describe + the key and cert you want generated. + +In general, the defaults in ``sslcert.xml`` and ``sslkey.xml`` should +be fine, so those files can look like this: + +``Cfg/etc/pki/tls/certs/bcfg2client.crt/sslcert.xml``: + +.. code-block:: xml + + + + + +``Cfg/etc/pki/tls/private/bcfg2client.key/sslkey.xml``: + +.. code-block:: xml + + + +Client Bundle +------------- + +To automate the process of generating and distributing certs to the +clients we need define at least the cert and key paths created by Cfg, +as well as the CA certificate path in a Bundle. For example: + +.. code-block:: xml + + + + + +Here's a more complete example bcfg2-client bundle: + +.. code-block:: xml + + + + + + + + + + + + + + + + + + + + +The ``bcfg2.conf`` client config needs at least 5 parameters set for +SSL auth. + +#. ``key`` : This is the host specific key that Cfg will create. +#. ``certificate`` : This is the host specific cert that Cfg will + create. +#. ``ca`` : This is a copy of your CA certificate. Not generated by + Cfg. +#. ``password`` : Set to arbitrary string when using certificate + auth. This also *shouldn't* be required. See: + http://trac.mcs.anl.gov/projects/bcfg2/ticket/1019 + +Here's what a functional **[communication]** section in a +``bcfg2.conf`` genshi template for clients might look like.:: + + [communication] + {% if metadata.uuid != None %}\ + user = ${metadata.uuid} + {% end %}\ + password = DUMMYPASSWORDFORCERTAUTH + {% choose %}\ + {% when 'rpm' in metadata.groups %}\ + certificate = /etc/pki/tls/certs/bcfg2client.crt + key = /etc/pki/tls/private/bcfg2client.key + ca = /etc/pki/tls/certs/bcfg2ca.crt + {% end %}\ + {% when 'deb' in metadata.groups %}\ + certificate = /etc/ssl/certs/bcfg2client.crt + key = /etc/ssl/private/bcfg2client.key + ca = /etc/ssl/certs/bcfg2ca.crt + {% end %}\ + {% end %}\ + +As a client will not be able to authenticate with certificates it does +not yet posses we need to overcome the chicken and egg scenario the +first time we try to connect such a client to the server. We can do so +using password based auth to bootstrap the client manually specifying +all the relevant auth parameters like so:: + + bcfg2 -qv -S https://fqdn.of.bcfg2-server:6789 -u fqdn.of.client \ + -x SUPER_SECRET_PASSWORD + +If all goes well the client should recieve a freshly generated key and +cert and you should be able to run ``bcfg2`` again without specifying +the connection parameters. + +If you do run into problems you may want to review +:ref:`appendix-guides-authentication`. diff -Nru bcfg2-1.3.5/doc/appendix/guides/ubuntu.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/ubuntu.txt --- bcfg2-1.3.5/doc/appendix/guides/ubuntu.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/ubuntu.txt 2017-01-10 19:18:17.000000000 +0000 @@ -172,7 +172,6 @@ transport = LocalFilesystem [communication] - protocol = xmlrpc/ssl password = secret certificate = /etc/ssl/bcfg2.crt key = /etc/ssl/bcfg2.key @@ -327,7 +326,7 @@ root@saucy:/var/lib/bcfg2# bcfg2 -vqdn Configured logging: DEBUG to console; DEBUG to syslog - {'help': False, 'extra': False, 'ppath': '/var/cache/bcfg2', 'ca': '/etc/ssl/bcfg2.crt', 'rpm_version_fail_action': 'upgrade', 'yum_version_fail_action': 'upgrade', 'retry_delay': '1', 'posix_uid_whitelist': [], 'rpm_erase_flags': ['allmatches'], 'verbose': True, 'certificate': '/etc/ssl/bcfg2.crt', 'paranoid': False, 'rpm_installonly': ['kernel', 'kernel-bigmem', 'kernel-enterprise', 'kernel-smp', 'kernel-modules', 'kernel-debug', 'kernel-unsupported', 'kernel-devel', 'kernel-source', 'kernel-default', 'kernel-largesmp-devel', 'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'], 'cache': None, 'yum24_autodep': True, 'yum_pkg_verify': True, 'probe_timeout': None, 'yum_installed_action': 'install', 'rpm_verify_fail_action': 'reinstall', 'dryrun': True, 'retries': '3', 'apt_install_path': '/usr', 'quick': True, 'password': 'secret', 'yum24_installed_action': 'install', 'kevlar': False, 'max_copies': 1, 'syslog': True, 'decision_list': False, 'configfile': '/etc/bcfg2.conf', 'remove': None, 'server': 'https://saucy:6789', 'encoding': 'UTF-8', 'timeout': 90, 'debug': True, 'yum24_installonly': ['kernel', 'kernel-bigmem', 'kernel-enterprise', 'kernel-smp', 'kernel-modules', 'kernel-debug', 'kernel-unsupported', 'kernel-devel', 'kernel-source', 'kernel-default', 'kernel-largesmp-devel', 'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'], 'yum24_erase_flags': ['allmatches'], 'yum24_pkg_checks': True, 'interactive': False, 'apt_etc_path': '/etc', 'rpm_installed_action': 'install', 'yum24_verify_fail_action': 'reinstall', 'omit_lock_check': False, 'yum24_pkg_verify': True, 'serverCN': None, 'file': None, 'apt_var_path': '/var', 'posix_gid_whitelist': [], 'posix_gid_blacklist': [], 'indep': False, 'decision': 'none', 'servicemode': 'default', 'version': False, 'rpm_pkg_checks': True, 'profile': None, 'yum_pkg_checks': True, 'args': [], 'bundle': [], 'posix_uid_blacklist': [], 'user': 'root', 'key': '/etc/ssl/bcfg2.key', 'command_timeout': None, 'probe_exit': True, 'lockfile': '/var/lock/bcfg2.run', 'yum_verify_fail_action': 'reinstall', 'yum24_version_fail_action': 'upgrade', 'yum_verify_flags': [], 'logging': None, 'rpm_pkg_verify': True, 'bundle_quick': False, 'rpm_verify_flags': [], 'yum24_verify_flags': [], 'skipindep': False, 'skipbundle': [], 'portage_binpkgonly': False, 'drivers': ['APK', 'APT', 'Action', 'Blast', 'Chkconfig', 'DebInit', 'Encap', 'FreeBSDInit', 'FreeBSDPackage', 'IPS', 'MacPorts', 'OpenCSW', 'POSIX', 'POSIXUsers', 'Pacman', 'Portage', 'RPM', 'RPMng', 'RcUpdate', 'SELinux', 'SMF', 'SYSV', 'Systemd', 'Upstart', 'VCS', 'YUM', 'YUM24', 'YUMng', 'launchd']} + {'help': False, 'extra': False, 'ppath': '/var/cache/bcfg2', 'ca': '/etc/ssl/bcfg2.crt', 'rpm_version_fail_action': 'upgrade', 'yum_version_fail_action': 'upgrade', 'retry_delay': '1', 'posix_uid_whitelist': [], 'rpm_erase_flags': ['allmatches'], 'verbose': True, 'certificate': '/etc/ssl/bcfg2.crt', 'paranoid': False, 'rpm_installonly': ['kernel', 'kernel-bigmem', 'kernel-enterprise', 'kernel-smp', 'kernel-modules', 'kernel-debug', 'kernel-unsupported', 'kernel-devel', 'kernel-source', 'kernel-default', 'kernel-largesmp-devel', 'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'], 'cache': None, 'yum24_autodep': True, 'yum_pkg_verify': True, 'probe_timeout': None, 'yum_installed_action': 'install', 'rpm_verify_fail_action': 'reinstall', 'dryrun': True, 'retries': '3', 'apt_install_path': '/usr', 'quick': True, 'password': 'secret', 'yum24_installed_action': 'install', 'kevlar': False, 'max_copies': 1, 'syslog': True, 'decision_list': False, 'configfile': '/etc/bcfg2.conf', 'remove': None, 'server': 'https://saucy:6789', 'encoding': 'UTF-8', 'timeout': 90, 'debug': True, 'yum24_installonly': ['kernel', 'kernel-bigmem', 'kernel-enterprise', 'kernel-smp', 'kernel-modules', 'kernel-debug', 'kernel-unsupported', 'kernel-devel', 'kernel-source', 'kernel-default', 'kernel-largesmp-devel', 'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'], 'yum24_erase_flags': ['allmatches'], 'yum24_pkg_checks': True, 'interactive': False, 'apt_etc_path': '/etc', 'rpm_installed_action': 'install', 'yum24_verify_fail_action': 'reinstall', 'omit_lock_check': False, 'yum24_pkg_verify': True, 'serverCN': None, 'file': None, 'apt_var_path': '/var', 'posix_gid_whitelist': [], 'posix_gid_blacklist': [], 'indep': False, 'decision': 'none', 'service_mode': 'default', 'version': False, 'rpm_pkg_checks': True, 'profile': None, 'yum_pkg_checks': True, 'args': [], 'bundle': [], 'posix_uid_blacklist': [], 'user': 'root', 'key': '/etc/ssl/bcfg2.key', 'command_timeout': None, 'probe_exit': True, 'lockfile': '/var/lock/bcfg2.run', 'yum_verify_fail_action': 'reinstall', 'yum24_version_fail_action': 'upgrade', 'yum_verify_flags': [], 'logging': None, 'rpm_pkg_verify': True, 'bundle_quick': False, 'rpm_verify_flags': [], 'yum24_verify_flags': [], 'skipindep': False, 'skipbundle': [], 'portage_binpkgonly': False, 'drivers': ['APK', 'APT', 'Action', 'Blast', 'Chkconfig', 'DebInit', 'Encap', 'FreeBSDInit', 'FreeBSDPackage', 'IPS', 'MacPorts', 'OpenCSW', 'POSIX', 'POSIXUsers', 'Pacman', 'Portage', 'RPM', 'RPMng', 'RcUpdate', 'SELinux', 'SMF', 'SYSV', 'Systemd', 'Upstart', 'VCS', 'YUM', 'YUM24', 'YUMng', 'launchd']} Starting Bcfg2 client run at 1374191628.88 Running probe groups Running: /tmp/tmpEtgdwo @@ -496,7 +495,7 @@ .. code-block:: xml - + diff -Nru bcfg2-1.3.5/doc/appendix/guides/vcs.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/vcs.txt --- bcfg2-1.3.5/doc/appendix/guides/vcs.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/vcs.txt 2017-01-10 19:18:17.000000000 +0000 @@ -30,7 +30,7 @@ Initialized git plugin with git directory = /var/lib/bcfg2/.git -Mercurial +Mercurial ========= The :ref:`server-plugins-version-hg` plugin also allows you to store @@ -59,7 +59,7 @@ Initialized hg plugin with hg directory = /var/lib/bcfg2/.hg -Darcs +Darcs ===== The :ref:`server-plugins-version-darcs` plugin also allows you to store @@ -70,8 +70,8 @@ darcs initialize -To commit to the darcs repository an author must be added to the -``_darcs/prefs/author`` file. If the ``author`` file is missing, +To commit to the darcs repository an author must be added to the +``_darcs/prefs/author`` file. If the ``author`` file is missing, darcs will ask you to enter your e-mail address. .. code-block:: sh @@ -99,7 +99,7 @@ The :ref:`server-plugins-version-cvs` plugin also allows you to store version information in the statistics database. - plugins = Base,Bundler,Cfg,...,Cvs + plugins = Bundler,Cfg,...,Cvs The CVS repository must be initialized:: diff -Nru bcfg2-1.3.5/doc/appendix/guides/web-reports-install.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/web-reports-install.txt --- bcfg2-1.3.5/doc/appendix/guides/web-reports-install.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/appendix/guides/web-reports-install.txt 2017-01-10 19:18:17.000000000 +0000 @@ -28,7 +28,7 @@ [server] repository = /var/lib/bcfg2 - plugins = Base,Bundler,Cfg,...,Reporting + plugins = Bundler,Cfg,...,Reporting [reporting] transport = LocalFilesystem @@ -53,7 +53,7 @@ [server] repository = /var/lib/bcfg2 - plugins = Base,Bundler,Cfg,...,Reporting + plugins = Bundler,Cfg,...,Reporting [database] engine = sqlite3 diff -Nru bcfg2-1.3.5/doc/client/metadata.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/client/metadata.txt --- bcfg2-1.3.5/doc/client/metadata.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/client/metadata.txt 2017-01-10 19:18:17.000000000 +0000 @@ -1,4 +1,5 @@ .. -*- mode: rst -*- +.. vim: ft=rst .. _client-metadata: @@ -24,12 +25,12 @@ This construction process spans several server plugins. The :ref:`server-plugins-grouping-metadata` is responsible for initial instance creation, including the client hostname, -profile, and basic group memberships. After this initial creation, -Connector plugins (such as :ref:`server-plugins-probes-index` or -:ref:`server-plugins-connectors-properties`) can add additional group -memberships for clients. These memberships are merged into the instance; -that is, the new group memberships are treated as if they were included -in groups.xml. If any of these groups are defined in groups.xml, +profile, and basic group memberships. After this initial +creation, Connector plugins (such as :ref:`server-plugins-probes` +or :ref:`server-plugins-connectors-properties`) can add additional +group memberships for clients. These memberships are merged into the +instance; that is, the new group memberships are treated as if they were +included in groups.xml. If any of these groups are defined in groups.xml, then groups included there are included in the ClientMetadata instance group list. At the end of this process, the ClientMetadata instance has its complete set of group memberships. At this point, each connector diff -Nru bcfg2-1.3.5/doc/client/tools/actions.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/client/tools/actions.txt --- bcfg2-1.3.5/doc/client/tools/actions.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/client/tools/actions.txt 2017-01-10 19:18:17.000000000 +0000 @@ -28,18 +28,17 @@ Note that the status attribute tells the bcfg2 client to ignore return status, causing failures to still not be centrally reported. If central reporting of action failure is desired, set this attribute to -'check'. Also note that Action entries included in Base will not be -executed. +'check'. -Actions may be completely defined inside of a bundle with the use of -:ref:`server-configurationentries`, much like Packages, Services or Paths. -The Rules plugin can also bind these entries. For example to include the -above action in a bundle, first the Action entry must be included in the +Actions may be completely defined inside of a bundle with the use of +:ref:`server-configurationentries`, much like Packages, Services or Paths. +The Rules plugin can also bind these entries. For example to include the +above action in a bundle, first the Action entry must be included in the bundle: .. code-block:: xml - + ... @@ -56,6 +55,16 @@ This allows different clients to get different actions as a part of the same bundle based on group membership. +It is also possible to do this in one step in the bundle itself with a +``BoundAction`` tag, e.g.: + +.. code-block:: xml + + + + + Example Action (add APT keys) ============================= diff -Nru bcfg2-1.3.5/doc/client/tools/vcs.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/client/tools/vcs.txt --- bcfg2-1.3.5/doc/client/tools/vcs.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/client/tools/vcs.txt 2017-01-10 19:18:17.000000000 +0000 @@ -8,8 +8,34 @@ .. warning: This tool is currently under development. -.. note: Currently, the only supported VCS is git. +.. note: Currently, the only supported VCS is git and svn. The VCS tool allows you to checkout particular revisions from a VCS repository on the client to a specified path. The tool requires the appropriate python libraries for the VCS used to be installed. + +See :ref:`server-plugins-generators-rules-vcs` for possible options. + +Example usage: + +You may want to create a `Rules/paths.xml` with the following: + +.. code-block:: xml + + + + + +Once the rule is created a client can reference the path from a +bundle, this path will then be populated from the repository. To +continue the above example, a file `Bundle/bcfg2.xml` might contain +this: + +.. code-block:: xml + + + + diff -Nru bcfg2-1.3.5/doc/client/tools/yum.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/client/tools/yum.txt --- bcfg2-1.3.5/doc/client/tools/yum.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/client/tools/yum.txt 2017-01-10 19:18:17.000000000 +0000 @@ -7,9 +7,7 @@ ============================ The RPM and YUM client drivers provide client support for RPMs -(installed directly from URLs) and Yum repositories. These drivers -were formerly called ``RPMng`` and ``YUMng``, respectively, but were -renamed for Bcfg2 1.3.0. +(installed directly from URLs) and Yum repositories. Features ======== diff -Nru bcfg2-1.3.5/doc/client/tools.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/client/tools.txt --- bcfg2-1.3.5/doc/client/tools.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/client/tools.txt 2017-01-10 19:18:17.000000000 +0000 @@ -133,8 +133,6 @@ Executes RPM to manage packages on Redhat-based and similar systems. Consider using the :ref:`YUM ` tool instead if possible. -Formerly called ``RPMng``, but was renamed for the 1.3 release. - SMF --- @@ -158,14 +156,22 @@ .. note:: - If the Packages specified in the PackageList are datastream format packages distributed via HTTP, you must specify a simplename attribute. Such packages will be downloaded and installed from a local path. + If the Packages specified in the PackageList are datastream format + packages distributed via HTTP, you must specify a simplefile attribute. + Such packages will be downloaded and installed from a local path. + + Note the use of the uri attribute in the datastream format example. If + the simplefile attribute exists, the + :ref:`Pkgmgr ` plugin will + automatically construct the url attribute by concatenating the uri and + simplefile attributes (with an intervening slash). - datastream format over HTTP: + Datastream format over HTTP: .. code-block:: xml - - + + File system format over NFS or local path: @@ -187,13 +193,5 @@ YUM --- -Handles RPMs using the YUM package manager. Renamed from ``YUMng`` for -the 1.3 release. See :ref:`client-tools-yum` for more details. - -YUM24 ------ - -.. warning:: Deprecated in favor of :ref:`YUM ` - -Handles RPMs using older versions of the YUM package manager. - +Handles RPMs using the YUM package manager. See +:ref:`client-tools-yum` for more details. diff -Nru bcfg2-1.3.5/doc/conf.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/conf.py --- bcfg2-1.3.5/doc/conf.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/conf.py 2017-01-10 19:18:17.000000000 +0000 @@ -64,9 +64,9 @@ # built documents. # # The short X.Y version. -version = '1.3' +version = '1.4' # The full version, including alpha/beta/rc tags. -release = '1.3.5' +release = '1.4.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -133,7 +133,7 @@ # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -html_favicon = 'favicon.ico' +html_favicon = '_static/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, diff -Nru bcfg2-1.3.5/doc/development/caching.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/caching.txt --- bcfg2-1.3.5/doc/development/caching.txt 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/caching.txt 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,74 @@ +.. -*- mode: rst -*- +.. vim: ft=rst + +.. _development-cache: + +============================ + Server-side Caching System +============================ + +.. versionadded:: 1.4.0 + +Bcfg2 caches two kinds of data: + +* The contents of all files that it reads in, including (often) an + optimized representation. E.g., XML files are cached both in their + raw (text) format, and also as :class:`lxml.etree._Element` objects. +* Arbitrary data, in the server-side caching system documented on this + page. + +The caching system keeps a single unified cache with all cache data in +it. Each individual datum stored in the cache is associated with any +number of "tags" -- simple terms that uniquely identify the datum. +This lets you very easily expire related data from multiple caches at +once; for isntance, for expiring all data related to a host: + +.. code-block:: python + + Bcfg2.Server.Cache.expire("foo.example.com") + +This would expire *all* data related to ``foo.example.com``, +regardless of which plugin cached it, and so on. + +This permits a high level of interoperation between different plugins +and the cache, which is necessary due to the wide distribution of data +in Bcfg2 and the many different data sources that can be incorported. +More technical details about writing code that uses the caches is below. + +Currently known caches are: + +.. currentmodule:: Bcfg2.Server.Plugins.Packages.Collection + ++-------------+---------------------------------------+-------------------------------------------------+------------------------------------------------------+ +| Tags | Key(s) | Values | Use | ++=============+=======================================+=================================================+======================================================+ +| Metadata | Hostname | :class:`ClientMetadata | The :ref:`Metadata cache ` | +| | | ` | | ++-------------+---------------------------------------+-------------------------------------------------+------------------------------------------------------+ +| Probes, | Hostname | ``list`` of group names | Groups set by :ref:`server-plugins-probes` | +| probegroups | | | | ++-------------+---------------------------------------+-------------------------------------------------+------------------------------------------------------+ +| Probes, | Hostname | ``dict`` of ````: | Other data set by :ref:`server-plugins-probes` | +| probedata | | :class:`ProbeData | | +| | | ` | | ++-------------+---------------------------------------+-------------------------------------------------+------------------------------------------------------+ +| Packages, | :attr:`Packages Collection cache key | :class:`Collection` | Kept by :ref:`server-plugins-generators-packages` in | +| collections | ` | | order to expire repository metadata cached on disk | ++-------------+---------------------------------------+-------------------------------------------------+------------------------------------------------------+ +| Packages, | Hostname | :attr:`Packages Collection cache key | Used by the Packages plugin to return Collection | +| clients | | ` | objects for clients. This is cross-referenced with | +| | | | the ``Packages, collections`` cache | ++-------------+---------------------------------------+-------------------------------------------------+------------------------------------------------------+ +| Packages, | :attr:`Packages Collection cache key | ``set`` of package names | Cached results from looking up | +| pkg_groups | `, | | ```` entries | +| | hash of the selected package groups | | | ++-------------+---------------------------------------+-------------------------------------------------+------------------------------------------------------+ +| Packages, | :attr:`Packages Collection cache key | ``set`` of package names | Cached results from resolving complete package sets | +| pkg_sets | `, | | for clients | +| | hash of the initial package selection | | | ++-------------+---------------------------------------+-------------------------------------------------+------------------------------------------------------+ + +These are enumerated so that they can be expired as needed by other +plugins or other code points. + +.. automodule:: Bcfg2.Server.Cache diff -Nru bcfg2-1.3.5/doc/development/cfg.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/cfg.txt --- bcfg2-1.3.5/doc/development/cfg.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/cfg.txt 2017-01-10 19:18:17.000000000 +0000 @@ -55,12 +55,6 @@ .. autoexception:: Bcfg2.Server.Plugin.exceptions.PluginInitError :noindex: -Global Variables -================ - -.. autodata:: Bcfg2.Server.Plugins.Cfg.SETUP -.. autodata:: Bcfg2.Server.Plugins.Cfg.CFG - Existing Cfg Handlers ===================== @@ -70,9 +64,11 @@ .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgPlaintextGenerator.CfgPlaintextGenerator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator.CfgGenshiGenerator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator.CfgCheetahGenerator +.. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgJinja2Generator.CfgJinja2Generator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenerator.CfgEncryptedGenerator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgEncryptedGenshiGenerator.CfgEncryptedGenshiGenerator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgEncryptedCheetahGenerator.CfgEncryptedCheetahGenerator +.. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgEncryptedJinja2Generator.CfgEncryptedJinja2Generator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgAuthorizedKeysGenerator.CfgAuthorizedKeysGenerator Creators @@ -81,18 +77,11 @@ .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgPrivateKeyCreator.CfgPrivateKeyCreator .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgPublicKeyCreator.CfgPublicKeyCreator -Filters -------- - -.. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgCatFilter.CfgCatFilter -.. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgDiffFilter.CfgDiffFilter - Info Handlers ------------- .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgDefaultInfo .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgInfoXML.CfgInfoXML -.. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgLegacyInfo.CfgLegacyInfo Verifiers --------- @@ -105,6 +94,6 @@ These other objects comprise the remainder of the Cfg plugin, and are included for completeness. -.. autoattribute:: Bcfg2.Server.Plugins.Cfg.DEFAULT_INFO .. autoclass:: Bcfg2.Server.Plugins.Cfg.CfgEntrySet .. autoclass:: Bcfg2.Server.Plugins.Cfg.Cfg +.. automethod:: Bcfg2.Server.Plugins.Cfg.get_cfg diff -Nru bcfg2-1.3.5/doc/development/compat.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/compat.txt --- bcfg2-1.3.5/doc/development/compat.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/compat.txt 2017-01-10 19:18:17.000000000 +0000 @@ -113,7 +113,7 @@ below, since some of these implementations may be feature-incomplete. +----------------+--------------------------------+--------------------------------------------+ -| Name | Python 2.4 | Python 2.4+ | +| Name | Python 2.4 | Python 2.5+ | +================+================================+============================================+ | formatdate | :func:`email.Utils.formatdate` | :func:`email.utils.formatdate` | +----------------+--------------------------------+--------------------------------------------+ @@ -129,6 +129,8 @@ +----------------+--------------------------------+--------------------------------------------+ | MutableMapping | :class:`UserDict.DictMixin` | :class:`collections.MutableMapping` (2.6+) | +----------------+--------------------------------+--------------------------------------------+ +| literal_eval | :func:`eval` | :func:`ast.literal_eval` (2.6+) | ++----------------+--------------------------------+--------------------------------------------+ walk_packages ~~~~~~~~~~~~~ @@ -171,6 +173,14 @@ :class:`collections.MutableMapping` is available in Python 2.6+, and will be used if available. +literal_eval +~~~~~~~~~~~~ + +:func:`ast.literal_eval` is a safe version of :func:`eval` that will only +allow delaration of literal strings, ints, list, dicts, etc. This was +introduced in Python 2.6, and as such Python 2.4 uses the plain-old +:func:`eval`. + Other Symbols ------------- diff -Nru bcfg2-1.3.5/doc/development/core.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/core.txt --- bcfg2-1.3.5/doc/development/core.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/core.txt 2017-01-10 19:18:17.000000000 +0000 @@ -10,8 +10,10 @@ Bcfg2 1.3 added a pluggable server core system so that the server core itself can be easily swapped out to use different technologies. It -currently ships with two backends: a builtin core written from scratch -using the various server tools in the Python standard library; and an +currently ships with several backends: a builtin core written from +scratch using the various server tools in the Python standard library; +a variant on the builtin core that uses Python 2.6's +:mod:`multiprocessing` library to process requests in parallel; and an experimental `CherryPy `_ based core. This page documents the server core interface so that other cores can be written to take advantage of other technologies, e.g., `Tornado @@ -20,20 +22,25 @@ A core implementation needs to: -* Override :func:`Bcfg2.Server.Core.BaseCore._daemonize` to handle - daemonization, writing the PID file, and dropping privileges. -* Override :func:`Bcfg2.Server.Core.BaseCore._run` to handle server +* Override :func:`Bcfg2.Server.Core.Core._run` to handle server startup. -* Override :func:`Bcfg2.Server.Core.BaseCore._block` to run the +* Override :func:`Bcfg2.Server.Core.Core._block` to run the blocking server loop. -* Call :func:`Bcfg2.Server.Core.BaseCore.shutdown` on orderly +* Call :func:`Bcfg2.Server.Core.Core.shutdown` on orderly shutdown. +A core that wants to use the network (i.e., a core that isn't used +entirely for introspection, as in :ref:`bcfg2-info +`, or other local tasks) should inherit from +:class:`Bcfg2.Server.Core.NetworkCore`, and must also override +:func:`Bcfg2.Server.Core.NetworkCore._daemonize` to handle daemonization, +writing the PID file, and dropping privileges. + Nearly all XML-RPC handling is delegated entirely to the core implementation. It needs to: -* Call :func:`Bcfg2.Server.Core.BaseCore.authenticate` to authenticate - clients. +* Call :func:`Bcfg2.Server.Core.NetworkCore.authenticate` to + authenticate clients. * Handle :exc:`xmlrpclib.Fault` exceptions raised by the exposed XML-RPC methods as appropriate. * Dispatch XML-RPC method invocations to the appropriate method, @@ -59,7 +66,7 @@ The builtin server core consists of the core implementation (:class:`Bcfg2.Server.BuiltinCore.Core`) and the XML-RPC server -implementation (:mod:`Bcfg2.SSLServer`). +implementation (:mod:`Bcfg2.Server.SSLServer`). Core ~~~~ @@ -69,7 +76,7 @@ XML-RPC Server ~~~~~~~~~~~~~~ -.. automodule:: Bcfg2.SSLServer +.. automodule:: Bcfg2.Server.SSLServer Multiprocessing Core -------------------- @@ -79,4 +86,4 @@ CherryPy Core ------------- -.. automodule:: Bcfg2.Server.CherryPyCore +.. automodule:: Bcfg2.Server.CherrypyCore diff -Nru bcfg2-1.3.5/doc/development/fam.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/fam.txt --- bcfg2-1.3.5/doc/development/fam.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/fam.txt 2017-01-10 19:18:17.000000000 +0000 @@ -56,11 +56,6 @@ .. automodule:: Bcfg2.Server.FileMonitor.Pseudo -Fam ---- - -.. automodule:: Bcfg2.Server.FileMonitor.Fam - Gamin ----- diff -Nru bcfg2-1.3.5/doc/development/lint.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/lint.txt --- bcfg2-1.3.5/doc/development/lint.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/lint.txt 2017-01-10 19:18:17.000000000 +0000 @@ -10,14 +10,14 @@ lets you easily write your own plugins to verify various parts of your Bcfg2 specification. -Plugins are loaded in one of two ways: +Plugins are included in a module of the same name as the plugin class +in :mod:`Bcfg2.Server.Lint`, e.g., :mod:`Bcfg2.Server.Lint.Validate`. -* They may be included in a module of the same name as the plugin - class in :mod:`Bcfg2.Server.Lint`, e.g., - :mod:`Bcfg2.Server.Lint.Validate`. -* They may be included directly in a Bcfg2 server plugin, called - "Lint", e.g., - :class:`Bcfg2.Server.Plugins.Metadata.MetadataLint`. +.. note:: + + It is no longer possible to include lint plugins directly in a + Bcfg2 server plugin, e.g., + :class:`Bcfg2.Server.Plugins.Metadata.MetadataLint`. Plugin Types ============ @@ -106,15 +106,15 @@ Existing ``bcfg2-lint`` Plugins =============================== -AWSTagsLint ------------ +AWSTags +------- -.. autoclass:: Bcfg2.Server.Plugins.AWSTags.AWSTagsLint +.. automodule:: Bcfg2.Server.Lint.AWSTags -BundlerLint ------------ +Bundler +------- -.. autoclass:: Bcfg2.Server.Plugins.Bundler.BundlerLint +.. automodule:: Bcfg2.Server.Lint.Bundler Comments -------- @@ -131,10 +131,10 @@ .. automodule:: Bcfg2.Server.Lint.GroupNames -GroupPatternsLint ------------------ +GroupPatterns +------------- -.. autoclass:: Bcfg2.Server.Plugins.GroupPatterns.GroupPatternsLint +.. automodule:: Bcfg2.Server.Lint.GroupPatterns InfoXML ------- @@ -146,25 +146,25 @@ .. automodule:: Bcfg2.Server.Lint.MergeFiles -MetadataLint ------------- +Metadata +-------- -.. autoclass:: Bcfg2.Server.Plugins.Metadata.MetadataLint +.. automodule:: Bcfg2.Server.Lint.Metadata -PkgmgrLint ----------- +Pkgmgr +------ -.. autoclass:: Bcfg2.Server.Plugins.Pkgmgr.PkgmgrLint +.. automodule:: Bcfg2.Server.Lint.Pkgmgr RequiredAttrs ------------- .. automodule:: Bcfg2.Server.Lint.RequiredAttrs -TemplateHelperLint ------------------- +TemplateHelper +-------------- -.. autoclass:: Bcfg2.Server.Plugins.TemplateHelper.TemplateHelperLint +.. automodule:: Bcfg2.Server.Lint.TemplateHelper Validate -------- diff -Nru bcfg2-1.3.5/doc/development/option_parsing.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/option_parsing.txt --- bcfg2-1.3.5/doc/development/option_parsing.txt 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/option_parsing.txt 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,248 @@ +.. -*- mode: rst -*- + +.. _development-option-parsing: + +==================== +Bcfg2 Option Parsing +==================== + +Bcfg2 uses an option parsing mechanism based on the Python +:mod:`argparse` module. It does several very useful things that +``argparse`` does not: + +* Collects options from various places, which lets us easily specify + per-plugin options, for example; +* Automatically loads components (such as plugins); +* Synthesizes option values from the command line, config files, and + environment variables; +* Can dynamically create commands with many subcommands (e.g., + bcfg2-info and bcfg2-admin); and +* Supports keeping documentation inline with the option declaration, + which will make it easier to generate man pages. + + +Collecting Options +================== + +One of the more important features of the option parser is its ability +to automatically collect options from loaded components (e.g., Bcfg2 +server plugins). Given the highly pluggable architecture of Bcfg2, +this helps ensure two things: + +#. We do not have to specify all options in all places, or even in + most places. Options are specified alongside the class(es) that use + them. +#. All options needed for a given script to run are guaranteed to be + loaded, without the need to specify all components that script uses + manually. + +For instance, assume a few plugins: + +* The ``Foo`` plugin takes one option, ``--foo`` +* The ``Bar`` plugin takes two options, ``--bar`` and ``--force`` + +The plugins are used by the ``bcfg2-quux`` command, which itself takes +two options: ``--plugins`` (which selects the plugins) and +``--test``. The options would be selected at runtime, so for instance +these would be valid: + +.. code-block:: bash + + bcfg2-quux --plugins Foo --foo --test + bcfg2-quux --plugins Foo,Bar --foo --bar --force + bcfg2-quux --plugins Bar --force + +But this would not: + + bcfg2-quux --plugins Foo --bar + +The help message would reflect the options that are available to the +default set of plugins. (For this reason, allowing component lists to +be set in the config file is very useful; that way, usage messages +reflect the components in the config file.) + +Components (in this example, the plugins) can be classes or modules. +There is no required interface for an option component. They may +*optionally* have: + +* An ``options`` attribute that is a list of + :class:`Bcfg2.Options.Options.Option` objects or option groups. +* A boolean ``parse_first`` attribute; if set to True, the options for + the component are parsed before all other options. This is useful + for, e.g., Django database settings, which must be parsed before + plugins that use Django can be loaded. +* A function or static method, ``options_parsed_hook``, that is called + when all options have been parsed. (This will be called again if + :func:`Bcfg2.Options.Parser.Parser.reparse` is called.) +* A function or static method, ``component_parsed_hook``, that is + called when early option parsing for a given component has + completed. This is *only* called for components with + ``parse_first`` set to True. It is passed a single argument: a + :class:`argparse.Namespace` object containing the complete set of + early options. + +Options are collected through two primary mechanisms: + +#. The :class:`Bcfg2.Options.Actions.ComponentAction` class. When a + ComponentAction subclass is used as the action of an option, then + options contained in the classes (or modules) given in the option + value will be added to the parser. +#. Modules that are not loaded via a + :class:`Bcfg2.Options.Actions.ComponentAction` option may load + options at runtime. + +Since it is preferred to add components instead of just options, +loading options at runtime is generally best accomplished by creating +a container object whose only purpose is to hold options. For +instance: + +.. code-block:: python + + def foo(): + # do stuff + + class _OptionContainer(object): + options = [ + Bcfg2.Options.BooleanOption("--foo", help="Enable foo")] + + @staticmethod + def options_parsed_hook(): + if Bcfg2.Options.setup.foo: + foo() + + Bcfg2.Options.get_parser().add_component(_OptionContainer) + +The Bcfg2.Options module +======================== + +.. currentmodule:: Bcfg2.Options + +.. autodata:: setup + +Options +------- + +The base :class:`Bcfg2.Options.Option` object represents an option. +Unlike options in :mod:`argparse`, an Option object does not need to +be associated with an option parser; it exists on its own. + +.. autoclass:: Option +.. autoclass:: PathOption +.. autoclass:: BooleanOption +.. autoclass:: PositionalArgument + +The Parser +---------- + +.. autoclass:: Parser +.. autofunction:: get_parser +.. autoexception:: OptionParserException + +Option Groups +------------- + +Options can be grouped in various meaningful ways. This uses a +variety of :mod:`argparse` functionality behind the scenes. + +In all cases, options can be added to groups in-line by simply +specifying them in the object group constructor: + +.. code-block:: python + + options = [ + Bcfg2.Options.ExclusiveOptionGroup( + Bcfg2.Options.Option(...), + Bcfg2.Options.Option(...), + required=True), + ....] + +Nesting object groups is supported in theory, but barely tested. + +.. autoclass:: OptionGroup +.. autoclass:: ExclusiveOptionGroup +.. autoclass:: Subparser +.. autoclass:: WildcardSectionGroup + +Subcommands +----------- + +This library makes it easier to work with programs that have a large +number of subcommands (e.g., :ref:`bcfg2-info ` and +:ref:`bcfg2-admin `). + +The normal implementation pattern is this: + +#. Define all of your subcommands as children of + :class:`Bcfg2.Options.Subcommand`. +#. Create a :class:`Bcfg2.Options.CommandRegistry` object that will be + used to register all of the commands. Registering a command + collect its options and adds it as a + :class:`Bcfg2.Options.Subparser` option group to the main option + parser. +#. Register your commands with the + :func:`Bcfg2.Options.CommandRegistry.register_commands` method of + your ``CommandRegistry`` object. +#. Add options from the + :attr:`Bcfg2.Options.CommandRegistry.command_options` + attribute to the option parser. +#. Parse options, and run. + +:mod:`Bcfg2.Server.Admin` provides a fairly simple implementation, +where the CLI class subclasses the command registry: + +.. code-block:: python + + class CLI(Bcfg2.Options.CommandRegistry): + def __init__(self): + Bcfg2.Options.CommandRegistry.__init__(self) + self.register_commands(globals().values(), parent=AdminCmd) + parser = Bcfg2.Options.get_parser( + description="Manage a running Bcfg2 server", + components=[self]) + parser.add_options(self.subcommand_options) + parser.parse() + +In this case, commands are collected from amongst all global variables +(the most likely scenario), and they must be children of +:class:`Bcfg2.Server.Admin.AdminCmd`, which itself subclasses +:class:`Bcfg2.Options.Subcommand`. + +Commands are defined by subclassing :class:`Bcfg2.Options.Subcommand`. +At a minimum, the :func:`Bcfg2.Options.Subcommand.run` method must be +overridden, and a docstring written. + +.. autoclass:: Subcommand +.. autoclass:: CommandRegistry + +Actions +------- + +Several custom argparse `actions +`_ provide +some of the option collection magic of :mod:`Bcfg2.Options`. + +.. autoclass:: ConfigFileAction +.. autoclass:: ComponentAction +.. autoclass:: PluginsAction + +Option Types +------------ + +:mod:`Bcfg2.Options` provides a number of useful types for use as the `type +`_ keyword +argument to +the :class:`Bcfg2.Options.Option` constructor. + +.. autofunction:: Bcfg2.Options.Types.path +.. autofunction:: Bcfg2.Options.Types.comma_list +.. autofunction:: Bcfg2.Options.Types.colon_list +.. autofunction:: Bcfg2.Options.Types.octal +.. autofunction:: Bcfg2.Options.Types.username +.. autofunction:: Bcfg2.Options.Types.groupname +.. autofunction:: Bcfg2.Options.Types.timeout +.. autofunction:: Bcfg2.Options.Types.size + +Common Options +-------------- + +.. autoclass:: Common diff -Nru bcfg2-1.3.5/doc/development/plugins.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/plugins.txt --- bcfg2-1.3.5/doc/development/plugins.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/plugins.txt 2017-01-10 19:18:17.000000000 +0000 @@ -1,4 +1,5 @@ .. -*- mode: rst -*- +.. vim: ft=rst .. _development-plugins: @@ -117,7 +118,9 @@ In Bcfg2 1.3.0, some limited :ref:`server-caching` was introduced. If you are writing a :class:`Bcfg2.Server.Plugin.interfaces.Connector` plugin that implements -:func:`Bcfg2.Server.Plugin.interfaces.Connector.get_additional_groups`, +:func:`Bcfg2.Server.Plugin.interfaces.Connector.get_additional_groups` +or +:func:`Bcfg2.Server.Plugin.interfaces.Connector.get_additional_data`, then you need to be able to invalidate the server metadata cache in order to be compatible with the ``cautious`` or ``aggressive`` caching modes. @@ -128,26 +131,27 @@ of the caching mode. See :ref:`server-caching` for a description of each mode. * :attr:`Bcfg2.Server.Core.metadata_cache`: A dict-like - :class:`Bcfg2.Cache.Cache` object that stores the cached data. + :class:`Bcfg2.Server.Cache.Cache` object that stores the cached + data. :class:`Bcfg2.Server.Plugin.base.Plugin` objects have access to the :class:`Bcfg2.Server.Core` object as ``self.core``. In general, -you'll be interested in the :func:`Bcfg2.Cache.Cache.expire` method; -if called with no arguments, it expires all cached data; if called -with one string argument, it expires cached data for the named client. +you'll be interested in the :func:`Bcfg2.Server.Cache.Cache.expire` +method; if called with no arguments, it expires all cached data; if +called with one string argument, it expires cached data for the named +client. It's important, therefore, that your Connector plugin can either track -when changes are made to the group membership it reports, and expire -cached data appropriately when in ``cautious`` or ``aggressive`` mode; -or prudently flag an incompatibility with those two modes. +when changes are made to the data or group membership it reports, and +expire cached data appropriately when in ``cautious`` or ``aggressive`` +mode; or prudently flag an incompatibility with those two modes. For examples, see: * :func:`Bcfg2.Server.Plugins.Probes.ReceiveData` takes a copy of the groups that have been assigned to a client by - :ref:`server-plugins-probes-index`, and if that data changes when - new probe data is received, it invalidates the cache for that - client. + :ref:`server-plugins-probes`, and if that data changes when new probe + data is received, it invalidates the cache for that client. * :func:`Bcfg2.Server.Plugins.GroupPatterns.Index` expires the entire cache whenever a FAM event is received for the :ref:`server-plugins-grouping-grouppatterns` config file. @@ -163,18 +167,18 @@ .. versionadded:: 1.3.0 Statistics can and should track execution time statistics using -:mod:`Bcfg2.Statistics`. This module tracks execution time for the +:mod:`Bcfg2.Server.Statistics`. This module tracks execution time for the server core and for plugins, and exposes that data via ``bcfg2-admin perf``. This data can be invaluable for locating bottlenecks or other performance issues. The simplest way to track statistics is to use the -:func:`Bcfg2.Server.Plugin.helpers.track_statistics` decorator to +:func:`Bcfg2.Server.Statistics.track_statistics` decorator to decorate functions that you would like to track execution times for: .. code-block:: python - from Bcfg2.Server.Plugin import track_statistics + from Bcfg2.Server.Statistics import track_statistics @track_statistics() def do_something(self, ...): @@ -184,13 +188,13 @@ More granular usage is possible by using :func:`time.time` to manually determine the execution time of a given event and calling -:func:`Bcfg2.Statistics.Statistics.add_value` with an appropriate +:func:`Bcfg2.Server.Statistics.Statistics.add_value` with an appropriate statistic name. -Bcfg2.Statistics -^^^^^^^^^^^^^^^^ +Bcfg2.Server.Statistics +^^^^^^^^^^^^^^^^^^^^^^^ -.. automodule:: Bcfg2.Statistics +.. automodule:: Bcfg2.Server.Statistics Plugin Helper Classes --------------------- diff -Nru bcfg2-1.3.5/doc/development/setup.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/setup.txt --- bcfg2-1.3.5/doc/development/setup.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/setup.txt 2017-01-10 19:18:17.000000000 +0000 @@ -1,4 +1,5 @@ .. -*- mode: rst -*- +.. vim: ft=rst .. _development-setup: @@ -12,6 +13,11 @@ git clone https://github.com/Bcfg2/bcfg2.git +.. note:: + + The URL above is read-only. If you are planning on submitting patches + upstream, please see :ref:`development-submitting-patches`. + * Add :file:`bcfg2/src/sbin` to your :envvar:`PATH` environment variable * Add :file:`bcfg2/src/lib` to your :envvar:`PYTHONPATH` environment variable diff -Nru bcfg2-1.3.5/doc/development/submitting-patches.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/submitting-patches.txt --- bcfg2-1.3.5/doc/development/submitting-patches.txt 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/submitting-patches.txt 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,144 @@ +.. -*- mode: rst -*- +.. vim: ft=rst + +.. _development-submitting-patches: + +================== +Submitting Patches +================== + +The purpose of this document is to assist those who may be less familiar +with git in submitting patches upstream. While git is powerful, it can +be somewhat confusing to those who don't use it regularly (and even +those who do). + +.. note:: + + We prefer more in-depth commit messages than those + given below which are purely for brevity in this guide. See + http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html + for more about creating proper git commit messages. + +.. _Github: https://github.com/ + +`Github`_ +========= + +These steps outline one way of submitting patches via `Github`_. First, +you will want to `fork `_ the +upstream Bcfg2 repository. + +Create a local branch +--------------------- + +Once you have forked the upstream repository, you should clone a local +copy (where is your github username). + +:: + + git clone git@github.com:/bcfg2.git + +Create a local feature/bugfix branch off the appropriate upstream +branch. For example, let's say we want to submit a bugfix for +:program:`bcfg2-info` against the 1.2.x series. We can create a +``fix-bcfg2-info`` branch which is a copy of the ``maint-1.2`` branch. + +:: + + git branch fix-bcfg2-info maint-1.2 + git checkout fix-bcfg2-info + +Commit changes to your local branch +----------------------------------- + +Next make whatever changes need to be made and commit them to the +``fix-bcfg2-info`` branch. + +:: + + git add src/sbin/bcfg2-info + git commit -m "Fix bcfg2-info bug" + +Now you need to push your ``fix-bcfg2-info`` branch to github. + +:: + + git push origin fix-bcfg2-info + +Submit pull request +------------------- + +Next, submit a pull request against the proper branch (in this case, +https://github.com/username/bcfg2/pull/new/fix-bcfg2-info -- again, +username is your github username). At the top of the pull request, you can +edit the upstream branch you are targetting so that you create the pull +request against the proper upstream branch (in this case, ``maint-1.2``). + +All that's left to do is to write up a description of your pull request +and click **Send pull request**. Since your local branch is specific to +this fix, you can add additional commits if needed and push them. They +will automatically be added to the pull request. + +Remove local branch +------------------- + +Once we have merged your pull request, you can safely delete your local +feature/bugfix branch. To do so, you must first checkout a different branch. + +:: + + git checkout master # switch to a different branch + git branch -d fix-bcfg2-info # delete your local copy of fix-bcfg2-info + git push origin :fix-bcfg2-info # delete fix-bcfg2-info from github + +Mailing List +============ + +The following lists the steps needed to use git's facilities for +emailing patches to the mailing list. + +Commit changes to your local clone +---------------------------------- + +For example, let's say we want to fix a big in :program:`bcfg2-info`. +For the 1.2.x series. + +:: + + git clone https://github.com/Bcfg2/bcfg2.git + git checkout maint-1.2 + # make changes + git add src/sbin/bcfg2-info + git commit -m "Fix bcfg2-info bug" + +Setup git for gmail (optional) +------------------------------ + +If you would like to use the GMail SMTP server, you can add the following +to your ~/.gitconfig file as per the :manpage:`git-send-email(1)` manpage. + +:: + + [sendemail] + smtpencryption = tls + smtpserver = smtp.gmail.com + smtpuser = yourname@gmail.com + smtpserverport = 587 + +Format patches +-------------- + +Use git to create patches formatted for email with the following. + +:: + + git format-patch --cover-letter -M origin/maint-1.2 -o outgoing/ + + +Send emails to the mailing list +------------------------------- + +Edit ``outgoing/0000-*`` and then send your emails to the mailing list +(bcfg-dev@lists.mcs.anl.gov):: + + git send-email outgoing/* diff -Nru bcfg2-1.3.5/doc/development/unit-testing.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/unit-testing.txt --- bcfg2-1.3.5/doc/development/unit-testing.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/development/unit-testing.txt 2017-01-10 19:18:17.000000000 +0000 @@ -1,4 +1,5 @@ .. -*- mode: rst -*- +.. vim: ft=rst .. _development-unit-testing: @@ -13,7 +14,7 @@ Nose`_ modules. You can then run the existing tests with the following: -.. code-block: bash +.. code-block: sh cd testsuite nosetests @@ -123,7 +124,7 @@ class, which all Bcfg2 :ref:`server-plugins-index` inherit from via the :mod:`Plugin interfaces `, yielding several levels of often-multiple inheritance. To make this -easier, our unit tests adhere to several design considerations: +easier, our unit tests adhere to several design considerations. Inherit Tests ------------- diff -Nru bcfg2-1.3.5/doc/exts/xmlschema.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/exts/xmlschema.py --- bcfg2-1.3.5/doc/exts/xmlschema.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/exts/xmlschema.py 2017-01-10 19:18:17.000000000 +0000 @@ -784,7 +784,7 @@ def clear_doc(self, docname): to_del = [] for dtype in self.types.keys(): - for key, (doc, _) in self.data[dtype].iteritems(): + for key, (doc, _) in self.data[dtype].items(): if doc == docname: to_del.append((dtype, key)) for dtype, key in to_del: @@ -803,7 +803,7 @@ def get_objects(self): for dtype in self.types.keys(): - for name, (docname, tgtid) in self.data[dtype].iteritems(): + for name, (docname, tgtid) in self.data[dtype].items(): yield (name, name, dtype, docname, tgtid, self.object_types[dtype].attrs['searchprio']) diff -Nru bcfg2-1.3.5/doc/getting_started/index.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/getting_started/index.txt --- bcfg2-1.3.5/doc/getting_started/index.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/getting_started/index.txt 2017-01-10 19:18:17.000000000 +0000 @@ -21,7 +21,7 @@ We recommend running the server on a Linux machine for ease of deployment due to the availability of packages for the dependencies. -First, you need to download and install Bcfg2. The section +First, you need to download and install Bcfg2. The section :ref:`installation-index` in this manual describes the steps to take. To start, you will need to install the server on one machine and the client on one or more machines. Yes, your server can also be a client @@ -71,7 +71,7 @@ should be something similar to:: Loaded tool drivers: - Chkconfig POSIX YUMng + Chkconfig POSIX YUM Phase: initial Correct entries: 0 @@ -108,7 +108,7 @@ that looks like:: bcfg-server:~ # ls /var/lib/bcfg2 - Base/ Bundler/ Cfg/ Metadata/ Pkgmgr/ Rules/ SSHbase/ etc/ + Bundler/ Cfg/ Metadata/ Pkgmgr/ Rules/ SSHbase/ etc/ The place to start is the Metadata directory, which contains two files: ``clients.xml`` and ``groups.xml``. Your current @@ -169,7 +169,7 @@ .. code-block:: xml - + @@ -223,7 +223,7 @@ Done! Now we just have 242 (or more) entries to take care of! -:ref:`server-plugins-structures-bundler-index` is a +:ref:`server-plugins-structures-bundler` is a relatively easy directory to populate. You can find many samples of Bundles in the :ref:`Bundler Example Repository `, many of which can @@ -255,6 +255,10 @@ Platform-specific Quickstart Notes ================================== -* :ref:`appendix-guides-centos` -* :ref:`appendix-guides-ubuntu` -* :ref:`getting_started-macosx-notes` +.. toctree:: + :maxdepth: 1 + + CentOS + Ubuntu + Gentoo + Mac OS X diff -Nru bcfg2-1.3.5/doc/help/troubleshooting.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/help/troubleshooting.txt --- bcfg2-1.3.5/doc/help/troubleshooting.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/help/troubleshooting.txt 2017-01-10 19:18:17.000000000 +0000 @@ -69,13 +69,13 @@ If the bcfg2 server is not reflecting recent changes, try restarting the bcfg2-server process ============================================================================================= -If this fixes the problem, it is either a bug in the -underlying file monitoring system (fam or gamin) or a bug in -Bcfg2's file monitoring code. In either case, file a `ticket +If this fixes the problem, it is either a bug in the underlying file +monitoring system (inotify or gamin) or a bug in Bcfg2's file +monitoring code. In either case, file a `ticket `_ in the tracking system. In the ticket, include: -* filesystem monitoring system (fam or gamin) +* filesystem monitoring system (inotify or gamin) * kernel version (if on linux) * if any messages of the form "Handled N events in M seconds" appeared between the modification event and the client @@ -259,8 +259,7 @@ :ref:`server-info` file for this entry. .. [s11] Verify that you have the proper prefix set in bcfg2.conf. .. [s12] Ensure that the client is a member of all the appropriate - :ref:`server-plugins-generators-packages-magic-groups` as - well as any additional groups you may have defined in your + groups you may have defined in your :ref:`server-plugins-generators-packages` configuration. FAQs diff -Nru bcfg2-1.3.5/doc/installation/distributions.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/installation/distributions.txt --- bcfg2-1.3.5/doc/installation/distributions.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/installation/distributions.txt 2017-01-10 19:18:17.000000000 +0000 @@ -22,23 +22,21 @@ .. _Alpine Linux: http://www.alpinelinux.org/ .. _testing: http://git.alpinelinux.org/cgit/aports/tree/testing/bcfg2 -ArchLinux -========= +Arch Linux +========== Packages for `Arch Linux`_ are available in the Arch User Repository (AUR_). -Just use `pacman` to perform the installation :: - - pacman -S bcfg2 bcfg2-server +The bcfg2 package includes bcfg2-server. .. _Arch Linux: http://www.archlinux.org/ -.. _AUR: http://aur.archlinux.org/packages.php?ID=20979 +.. _AUR: http://aur.archlinux.org/packages/bcfg2 Debian ====== -Packages of Bcfg2 are available for Debian Lenny, Debian Squeeze, and -Debian Sid. The fastest way to get Bcfg2 onto your Debian system -is to use ``apt-get`` or ``aptitude``. :: +Packages of Bcfg2 are available for all current versions of Debian. +The fastest way to get Bcfg2 onto your Debian system is to use ``apt-get`` +or ``aptitude``. :: sudo aptitude install bcfg2 bcfg2-server diff -Nru bcfg2-1.3.5/doc/installation/prerequisites.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/installation/prerequisites.txt --- bcfg2-1.3.5/doc/installation/prerequisites.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/installation/prerequisites.txt 2017-01-10 19:18:17.000000000 +0000 @@ -25,7 +25,7 @@ +----------------------------+------------------------+--------------------------------+ | lxml or elementtree [#f2]_ | Any | lxml: libxml2, libxslt, python | +----------------------------+------------------------+--------------------------------+ -| python-apt [#f3]_ | Any | python | +| python-apt [#f3]_ | 0.7.91 and greater | python | +----------------------------+------------------------+--------------------------------+ | debsums (if APT tool | Any | | | driver is used) | | | @@ -60,6 +60,8 @@ +-------------------------------+----------+--------------------------------+ | python-setuptools | Any | | +-------------------------------+----------+--------------------------------+ +| python-genshi | Any | | ++-------------------------------+----------+--------------------------------+ Bcfg2 Reporting --------------- @@ -70,7 +72,7 @@ +-------------------------------+----------+--------------------------------+ | Software | Version | Requires | +===============================+==========+================================+ -| django | 1.2.0+ | | +| django | 1.3.0+ | | +-------------------------------+----------+--------------------------------+ | south | 0.7.5+ | | +-------------------------------+----------+--------------------------------+ diff -Nru bcfg2-1.3.5/doc/introduction/os-support.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/introduction/os-support.txt --- bcfg2-1.3.5/doc/introduction/os-support.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/introduction/os-support.txt 2017-01-10 19:18:17.000000000 +0000 @@ -17,13 +17,13 @@ .. _OpenSolaris: http://opensolaris.org/ .. _Solaris: http://www.sun.com/software/solaris/ -* Many `GNU/Linux`_ distributions, including `Archlinux`_, `Blag`_, `CentOS`_, +* Many `GNU/Linux`_ distributions, including `Arch Linux`_, `Blag`_, `CentOS`_, `Debian`_, `Fedora`_, `Gentoo`_, `gNewSense`_, `Mandriva`_, `OpenSUSE`_, `Red Hat/RHEL`_, `Scientific Linux`_, `SuSE/SLES`_, `Trisquel`_, and `Ubuntu`_. .. _GNU/Linux: http://www.gnu.org/gnu/Linux-and-gnu.html -.. _Archlinux: http://www.archlinux.org +.. _Arch Linux: http://www.archlinux.org .. _Blag: http://www.blagblagblag.org/ .. _CentOS: http://www.centos.org/ .. _Debian: http://www.debian.org/ diff -Nru bcfg2-1.3.5/doc/man/bcfg2-admin.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/man/bcfg2-admin.txt --- bcfg2-1.3.5/doc/man/bcfg2-admin.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/man/bcfg2-admin.txt 2017-01-10 19:18:17.000000000 +0000 @@ -38,9 +38,6 @@ backup Create an archive of the entire Bcfg2 repository. -bundle *action* - Display details about the available bundles (See BUNDLE OPTIONS - below). client *action* *client* [attribute=value] Add, edit, or remove clients entries in metadata (See CLIENT OPTIONS below). @@ -48,8 +45,12 @@ Compare two client configurations. Can be used to verify consistent behavior between releases. Determine differences between files or directories (See COMPARE OPTIONS below). +dbshell + Call the Django 'dbshell' command on the configured database. init Initialize a new repository (interactive). +initreports + Initialize the Reporting database. minestruct *client* [-f xml-file] [-g groups] Build structure entries based on client statistics extra entries (See MINESTRUCT OPTIONS below). @@ -58,34 +59,31 @@ pull *client* *entry-type* *entry-name* Install configuration information into repo based on client bad entries (See PULL OPTIONS below). -reports [init|load_stats|purge|scrub|update] - Interact with the dynamic reporting system (See REPORTS OPTIONS - below). -snapshots [init|dump|query|reports] - Interact with the Snapshots database (See SNAPSHOTS OPTIONS below). +purgereports + Purge historic and expired data from the Reporting database +reportssqlall + Call the Django 'shell' command on the Reporting database. +reportsstats + Print Reporting database statistics. +scrubreports + Scrub the Reporting database for duplicate reasons and orphaned + entries. +shell + Call the Django 'shell' command on the configured database. syncdb Sync the Django ORM with the configured database. tidy Remove unused files from repository. +updatereports + Apply database schema updates to the Reporting database. +validatedb + Call the Django 'validate' command on the configured database. viz [-H] [-b] [-k] [-o png-file] Create a graphviz diagram of client, group and bundle information (See VIZ OPTIONS below). xcmd Provides a XML-RPC Command Interface to the bcfg2-server. -BUNDLE OPTIONS -++++++++++++++ - -mode - One of the following. - - *list-xml* - List all available xml bundles - *list-genshi* - List all available genshi bundles - *show* - Interactive dialog to get details about the available bundles - CLIENT OPTIONS ++++++++++++++ @@ -110,11 +108,20 @@ COMPARE OPTIONS +++++++++++++++ +-d *N*, --diff-lines *N* + Show only N lines of a diff + +-c, --color + Show colors even if not ryn from a TTY + +-q, --quiet + Only show that entries differ, not how they differ + old - Specify the location of the old configuration file. + Specify the location of the old configuration(s). new - Specify the location of the new configuration file. + Specify the location of the new configuration(s). MINESTRUCT OPTIONS ++++++++++++++++++ @@ -140,51 +147,24 @@ entry name Specify the name of the entry to pull. -REPORTS OPTIONS -+++++++++++++++ - -load_stats [-s] [-c] [-03] - Load statistics data. - -purge [--client [n]] [--days [n]] [--expired] - Purge historic and expired data. - -scrub - Scrub the database for duplicate reasons and orphaned entries. - -update - Apply any updates to the reporting database. - -SNAPSHOTS OPTIONS -+++++++++++++++++ - -init - Initialize the snapshots database. - -query - Query the snapshots database. - -dump - Dump some of the contents of the snapshots database. - -reports [-a] [-b] [-e] [--date=MM-DD-YYYY] - Generate reports for clients in the snapshots database. - VIZ OPTIONS +++++++++++ --H +-H, --includehosts Include hosts in diagram. --b +-b, --includebundles Include bundles in diagram. --o +-o *outfile*, --outfile *outfile* Write to outfile file instead of stdout. --k +-k, --includekey Add a shape/color key. +-c *hostname*, --only-client *hostname* + Only show groups and bundles for the named client + See Also -------- diff -Nru bcfg2-1.3.5/doc/man/bcfg2.conf.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/man/bcfg2.conf.txt --- bcfg2-1.3.5/doc/man/bcfg2.conf.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/man/bcfg2.conf.txt 2017-01-10 19:18:17.000000000 +0000 @@ -43,14 +43,13 @@ inotify gamin - fam pseudo fam_blocking Whether the server should block at startup until the file monitor backend has processed all events. This can cause a slower startup, but ensure that all files are recognized before the first client - is handled. + is handled. Defaults to True. ignore_files A comma-separated list of globs that should be ignored by the file @@ -76,24 +75,22 @@ A comma-delimited list of enabled server plugins. Currently available plugins are:: - Account - Base + ACL Bundler Bzr Cfg Cvs Darcs - DBStats Decisions + Defaults Deps - Editor FileProbes Fossil Git + GroupLogic GroupPatterns Guppy Hg - Hostbase Ldap Metadata NagiosGen @@ -108,14 +105,9 @@ Rules SEModules ServiceCompat - Snapshots SSHbase - SSLCA - Statistics Svn - TCheetah TemplateHelper - TGenshi Trigger Descriptions of each plugin can be found in their respective @@ -158,25 +150,10 @@ This section has a listing of all the plugins currently provided with Bcfg2. -Account Plugin -++++++++++++++ - -The account plugin manages authentication data, including the following. - -* ``/etc/passwd`` -* ``/etc/group`` -* ``/etc/security/limits.conf`` -* ``/etc/sudoers`` -* ``/root/.ssh/authorized_keys`` - -Base Plugin -+++++++++++ +ACL Plugin +++++++++++ -The Base plugin is a structure plugin that provides the ability -to add lists of unrelated entries into client configuration entry -inventories. Base works much like Bundler in its file format. This -structure plugin is good for the pile of independent configs needed for -most actual systems. +The ACL plugin controls which hosts can make which XML-RPC calls. Bundler Plugin ++++++++++++++ @@ -203,25 +180,20 @@ directory tree modeled off of the directory tree on your client machines. -Cvs Plugin (experimental) -+++++++++++++++++++++++++ +Cvs Plugin +++++++++++ The Cvs plugin allows you to track changes to your Bcfg2 repository using a Concurrent version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. -Darcs Plugin (experimental) -+++++++++++++++++++++++++++ +Darcs Plugin +++++++++++++ The Darcs plugin allows you to track changes to your Bcfg2 repository using a Darcs version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. -DBStats Plugin -++++++++++++++ - -Direct to database statistics plugin. - Decisions Plugin ++++++++++++++++ @@ -245,13 +217,6 @@ The Deps plugin allows you to make a series of assertions like "Package X requires Package Y (and optionally also Package Z etc.)" -Editor Plugin -+++++++++++++ - -The Editor plugin attempts to allow you to partially manage -configuration for a file. Its use is not recommended and not well -documented. - FileProbes Plugin +++++++++++++++++ @@ -274,6 +239,12 @@ using a Git version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. +GroupLogic Plugin ++++++++++++++++++ + +The GroupLogic plugin lets you flexibly assign group membership with a +Genshi template. + GroupPatterns Plugin ++++++++++++++++++++ @@ -286,22 +257,17 @@ The Guppy plugin is used to trace memory leaks within the bcfg2-server process using Guppy. -Hg Plugin (experimental) -++++++++++++++++++++++++ +Hg Plugin ++++++++++ The Hg plugin allows you to track changes to your Bcfg2 repository using a Mercurial version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. -Hostbase Plugin -+++++++++++++++ - -The Hostbase plugin is an IP management system built on top of Bcfg2. - Ldap Plugin +++++++++++ -The Ldap plugin makes it possible to fetch data from an LDAP directory, +The Ldap plugin makes it possible to fetch data from a LDAP directory, process it and attach it to your metadata. Metadata Plugin @@ -316,8 +282,8 @@ The NagiosGen plugin dynamically generates Nagios configuration files based on Bcfg2 data. -Ohai Plugin (experimental) -++++++++++++++++++++++++++ +Ohai Plugin ++++++++++++ The Ohai plugin is used to detect information about the client operating system. The data is reported back to the server using JSON. @@ -373,10 +339,10 @@ Rules Plugin ++++++++++++ -The Rules plugin provides literal configuration entries that resolve the -abstract configuration entries normally found in the Bundler and Base -plugins. The literal entries in Rules are suitable for consumption by -the appropriate client drivers. +The Rules plugin provides literal configuration entries that resolve +the abstract configuration entries normally found in Bundler. The +literal entries in Rules are suitable for consumption by the +appropriate client drivers. SEModules Plugin ++++++++++++++++ @@ -389,12 +355,6 @@ The ServiceCompat plugin converts service entries for older clients. -Snapshots Plugin -++++++++++++++++ - -The Snapshots plugin stores various aspects of a client’s state when the -client checks in to the server. - SSHbase Plugin ++++++++++++++ @@ -402,17 +362,6 @@ hosts. It also manages the ssh_known_hosts file. It can integrate host keys from other management domains and similarly export its keys. -SSLCA Plugin -++++++++++++ - -The SSLCA plugin is designed to handle creation of SSL privatekeys and -certificates on request. - -Statistics -++++++++++ - -The Statistics plugin is deprecated (see Reporting). - Svn Plugin ++++++++++ @@ -420,20 +369,6 @@ using a Subversion backend. Currently, it enables you to get revision information out of your repository for reporting purposes. -TCheetah Plugin -+++++++++++++++ - -The TCheetah plugin allows you to use the cheetah templating system to -create files. It also allows you to include the results of probes -executed on the client in the created files. - -TGenshi Plugin -++++++++++++++ - -The TGenshi plugin allows you to use the Genshi templating system to -create files. It also allows you to include the results of probes -executed on the client in the created files. - Trigger Plugin ++++++++++++++ @@ -512,7 +447,7 @@ sets the password to use to connect to the server. protocol - Communication protocol to use. Defaults to xmlrpc/ssl. + Communication protocol to use. Defaults to xmlrpc/tlsv1. retries A client-only option. Number of times to retry network @@ -602,6 +537,10 @@ The following options are specified in the **[packages]** section. + backends + Comma separated list of backends for the dependency resolution. + Default is "Yum,Apt,Pac,Pkgng". + resolver Enable dependency resolution. Default is 1 (true). @@ -667,25 +606,12 @@ running in paranoid mode. Only the most recent versions of these copies will be kept. -Snapshots options ------------------ - -Specified in the **[snapshots]** section. These options control the -server snapshots functionality. - - driver - sqlite - - database - The name of the database to use for statistics data. - - e.g.: ``$REPOSITORY_DIR/etc/bcfg2.sqlite`` - -SSLCA options -------------- +SSL CA options +-------------- -These options are necessary to configure the SSLCA plugin and can be -found in the **[sslca_default]** section of the configuration file. +These options are necessary to configure the SSL CA feature of the Cfg +plugin and can be found in the **[sslca_default]** section of the +configuration file. config Specifies the location of the openssl configuration file for @@ -710,7 +636,7 @@ control the database connection of the server. engine - The database engine used by the statistics module. One of the + The database engine used by server plugins. One of the following:: postgresql @@ -719,9 +645,9 @@ ado_mssql name - The name of the database to use for statistics data. If + The name of the database to use for server data. If 'database_engine' is set to 'sqlite3' this is a file path to - the sqlite file and defaults to ``$REPOSITORY_DIR/etc/brpt.sqlite``. + the sqlite file and defaults to ``$REPOSITORY_DIR/etc/bcfg2.sqlite``. user User for database connections. Not used for sqlite3. @@ -736,9 +662,42 @@ Port for database connections. Not used for sqlite3. options - Various options for the database connection. The value is - expected as multiple key=value pairs, separated with commas. - The concrete value depends on the database engine. + Various options for the database connection. The value expected + is the literal value of the django OPTIONS setting. + + reporting_engine + The database engine used by the Reporting plugin. One of the + following:: + + postgresql + mysql + sqlite3 + ado_mssql + + If reporting_engine is not specified, the Reporting plugin uses + the same database as the other server plugins. + + reporting_name + The name of the database to use for reporting data. If + 'database_engine' is set to 'sqlite3' this is a file path to + the sqlite file and defaults to + ``$REPOSITORY_DIR/etc/reporting.sqlite``. + + reporting_user + User for reporting database connections. Not used for sqlite3. + + reporting_password + Password for reporting database connections. Not used for sqlite3. + + reporting_host + Host for reporting database connections. Not used for sqlite3. + + reporting_port + Port for reporting database connections. Not used for sqlite3. + + reporting_options + Various options for the database connection. The value expected + is the literal value of the django OPTIONS setting. Reporting options ----------------- @@ -755,6 +714,15 @@ web_debug Turn on Django debugging. + max_children + Maximum number of children for the reporting collector. Use 0 to + disable the limit. (default is 0) + + django_settings + Arbitrary options for the Django installation. The value expected + is a literal python dictionary, that is merged with the already set + django settings. + See Also -------- diff -Nru bcfg2-1.3.5/doc/man/bcfg2-server.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/man/bcfg2-server.txt --- bcfg2-1.3.5/doc/man/bcfg2-server.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/man/bcfg2-server.txt 2017-01-10 19:18:17.000000000 +0000 @@ -11,7 +11,7 @@ **bcfg2-server** [-d] [-v] [-C *configfile*] [-D *pidfile*] [-E *encoding*] [-Q *repo path*] [-S *server url*] [-o *logfile*] [-x -*password*] [--ssl-key=\ *ssl key*] +*password*] [--ssl-key=\ *ssl key*] [--no-fam-blocking] Description ----------- @@ -22,19 +22,20 @@ Options ------- --C configfile Specify alternate bcfg2.conf location. --D pidfile Daemonize, placing the program pid in *pidfile*. --E encoding Specify the encoding of config files. --Q path Specify the path to the server repository. --S server Manually specify the server location (as opposed to - using the value in bcfg2.conf). This should be in - the format "https://server:port" --d Enable debugging output. --v Run in verbose mode. --h Print usage information. ---ssl-key=key Specify the path to the SSL key. +-C configfile Specify alternate bcfg2.conf location. +-D pidfile Daemonize, placing the program pid in *pidfile*. +-E encoding Specify the encoding of config files. +-Q path Specify the path to the server repository. +-S server Manually specify the server location (as opposed to + using the value in bcfg2.conf). This should be in + the format "https://server:port" +-d Enable debugging output. +-v Run in verbose mode. +-h Print usage information. +--ssl-key=key Specify the path to the SSL key. +--no-fam-blocking Synonym for fam_blocking = False in bcfg2.conf See Also -------- -:manpage:`bcfg2(1)`, :manpage:`bcfg2-lint(8)` +:manpage:`bcfg2(1)`, :manpage:`bcfg2-lint(8)`, :manpage:`bcfg2.conf(5)` diff -Nru bcfg2-1.3.5/doc/releases/1.3.6.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/releases/1.3.6.txt --- bcfg2-1.3.5/doc/releases/1.3.6.txt 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/releases/1.3.6.txt 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,43 @@ +.. -*- mode: rst -*- +.. vim: ft=rst + +.. _releases-1.3.6: + +1.3.6 +===== + +We are happy to announce the release of Bcfg2 1.3.6. It is available for +download at: + + ftp://ftp.mcs.anl.gov/pub/bcfg + +This is primarily a bugfix release. + +* Fix python 2.4 compatibility +* Fix stale lockfile detection and behavior +* Reporting: fix filter urls +* Fix client protocol option handling +* YUM: Add options to enable and disable Yum plugins +* Packages: add name to sources +* Reporting: better exception handling +* Various interrupt handling fixes +* Fix client decision whitelist/blacklist handling +* Fix database OPTIONS parsing + + This change requires you to set the *options* value of the + ``[database`` section in ``bcfg2.conf`` to the literal value which is + passed through to the django OPTIONS setting. + + https://docs.djangoproject.com/en/1.7/ref/settings/#std:setting-OPTIONS + +* SYSV: change instances of simplename to simplefile + + Previous configurations can be updated using the migration tool. + +* Authentication: Reject passwd auth, if authentication is set to "cert" +* Server/Core: drop privileges even if not running as daemon +* Packages/Yum.py: Fix dependency resolution logic +* Handle filesystem secontexts properly for contextless filesystems + +Special thanks to the following contributors for this release: Michael +Fenn, Matt Kemp, Alexander Sulfrian, Jonathan Billings, Ross Smith. diff -Nru bcfg2-1.3.5/doc/releases/1.4.0pre1.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/releases/1.4.0pre1.txt --- bcfg2-1.3.5/doc/releases/1.4.0pre1.txt 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/releases/1.4.0pre1.txt 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,182 @@ +.. -*- mode: rst -*- +.. vim: ft=rst + +.. _releases-1.4.0pre1: + +1.4.0pre1 +========= + +The first prerelease for Bcfg2 1.4.0 is now available at: + + ftp://ftp.mcs.anl.gov/pub/bcfg + +Bcfg2 1.4.0pre1 is a prerelease, and contains many new features, +including some that are backwards-incompatible with Bcfg2 1.3.x and +earlier. Please read the release notes thoroughly. This is a prerelease +and as such is not likely suitable for general production deployment. +That said, please help us test the release in non- and preproduction +environments. + +backwards-incompatible user-facing changes +------------------------------------------ + +* Completely rewrote option parser + + Many single character options now have long equivalents. Some + subcommand interfaces (``bcfg2-info``, ``bcfg2-admin``) have been + reorganized to some degree. ``bcfg2-reports`` syntax is completely + different. + +* Added new :ref:`server-plugins-misc-acl` plugin + + Default ACLs only allow clients to perform bcfg2 client runs, and only + permit `bcfg2-admin xcmd` calls from localhost. If you want to change + this, you must enable the ACL plugin and configure your own ACLs. + +* Added genshi requirement for the server + +* :ref:`server-plugins-generators-decisions` + + * Switch plugin to use StructFile instead of host- or group-specific XML + files (this allows a single e.g. whitelist.xml file with tags) + + You can convert your existing decisions using + ``tools/upgrade/1.4/migrate_decisions.py``. + + +deprecated features (will be removed in a future release, likely 1.5) +--------------------------------------------------------------------- + +* :ref:`server-plugins-structures-bundler` + + * Deprecated use of an explicit name attribute + + You can convert your existing bundles using + ``tools/upgrade/1.4/convert_bundles.py``. + + * Deprecated :ref:`.genshi bundles + ` (use + .xml bundles and specify the genshi namespace instead) + +* SSLCA + + * Deprecated plugin + * SSLCA functionality has been added to the Cfg plugin: + see :ref:`server-plugins-generators-cfg-ssl-certificates` + +deprecated plugins and features which have been removed +------------------------------------------------------- + +Plugins +^^^^^^^ + +* PostInstall +* TGenshi +* TCheetah +* Account +* Hostbase +* Snapshots +* Statistics +* Editor +* Base + +Client tools +^^^^^^^^^^^^ + +* RPMng +* YUM24 +* YUMng + +Other features +^^^^^^^^^^^^^^ + +* FAM filemonitor +* Removed mode="inherit" support +* Removed support for .cat/.diff files +* Removed support for info/:info files +* Removed "magic" groups (for the Packages plugin) + +other fixes and new features +---------------------------- + +* Added :ref:`inter-bundle dependencies + ` +* Added support for :ref:`independent bundles + ` (replaces + the functionality of Base): +* Added support for wildcard XIncludes +* Add Solaris 11 IPS Package support +* Add bcfg2-report-collector init script to debian package +* Git VCS plugin enhancements +* Removed deprecated plugins + +* :ref:`server-plugins-structures-bundler` + + * Deprecated use of an explicit name attribute + * Deprecated .genshi bundles + * Added path globbing + +* :ref:`server-plugins-grouping-metadata` + + * Allow setting global default authentication type + +* :ref:`server-plugins-generators-packages` + + * Add yum group support to internal resolver + * Change location of plugin-generated APT sources + * Add new Pkgng plugin + * Add ability for per-package recommended flag override + +* :ref:`server-plugins-statistics-reporting` + + * Add support for POSIX user/group entries + * Add support for Django > 1.4 + * Add support for separate reporting database + +* Added option to periodically dump performance stats to logs +* Added option to force server to wait until all FAM events are + processed + +* :ref:`server-plugins-generators-sshbase` + + * Add support for IPv6 addresses in known_hosts file + * Add support for :ref:`encryption of generated ssh keys + ` + +* APT + + * Allow specification of deb-src lines (resolves + http://trac.mcs.anl.gov/projects/bcfg2/ticket/1148) + +* SSLCA + + * Rewrote SSLCA as Cfg handler + + Existing SSLCA installations will need to migrate to the new format + using ``tools/upgrade/1.4/migrate_sslca.py``. + +* :ref:`server-plugins-generators-nagiosgen` + + * Migrate configuration to conf.d + +* :ref:`server-plugins-probes` + + * Rewritten to improve caching + * Add probes.allowed_groups option to restrict group assignments: + see :ref:`server-plugins-probes-dynamic-groups` + + +Thanks +------ + +Special thanks to the following contributors for this release + + * Alexander Sulfrain + * Chris Brinker + * Duncan Hutty + * Jason Kincl + * John Morris + * Matt Schwager + * Michael Fenn + * Stéphane Graber + * Tim Laszlo diff -Nru bcfg2-1.3.5/doc/releases/1.4.0pre2.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/releases/1.4.0pre2.txt --- bcfg2-1.3.5/doc/releases/1.4.0pre2.txt 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/releases/1.4.0pre2.txt 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,56 @@ +.. -*- mode: rst -*- +.. vim: ft=rst + +.. _releases-1.4.0pre2: + +1.4.0pre2 +========= + +The second prerelease for Bcfg2 1.4.0 is now available at: + + http://bcfg2.org/download/ + +Bcfg2 1.4.0pre2 is a prerelease, and contains many new features, +including some that are backwards-incompatible with Bcfg2 1.3.x and +earlier. Please read the release notes thoroughly. This is a prerelease +and as such is not likely suitable for general production deployment. +That said, please help us test the release in non- and preproduction +environments. + +* NagiosGen: Add bundles to configuration +* HomeBrew: Initial add of plugin +* Rules/Defaults: Add possibility to use name of entry in attributes + +backwards-incompatible user-facing changes +------------------------------------------ + +* Changed default communication protocol to xmlrpc/tlsv1 + +* Diff output from files sent to the Reports plugin from the client will now be + in a unified diff format rather than the previous n-diff format. + + This fixes potentially long client runs when comparing files that have + diverged significantly. + +* The database options in the config (options and reporting_options in database + section) now have to be literal python dictionaries. + + This allows to set arbitrary options with nested settings. + +* The Ldap plugin changed significantly. The configuration interface was + simplified and new configuration options for the number of retries and the + delay in between were added. + + You have to register your ldap queries in the global list, there is no + distinction between LdapQueries and LdapSubQueries anymore, the names of + your queries default to the class names and the Ldap plugin expires + the metadata caches if the config file changes. + +Thanks +------ + +Special thanks to the following contributors for this release + + * Alexander Sulfrain + * Matt Kemp + * Jeremie Banier diff -Nru bcfg2-1.3.5/doc/releases/index.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/releases/index.txt --- bcfg2-1.3.5/doc/releases/index.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/releases/index.txt 2017-01-10 19:18:17.000000000 +0000 @@ -7,4 +7,10 @@ Release Announcements ===================== -.. include:: 1.3.4.txt +.. toctree:: + + 1.4.0pre2 + 1.4.0pre1 + 1.3.6 + 1.3.5 + 1.3.4 diff -Nru bcfg2-1.3.5/doc/reports/dynamic.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/reports/dynamic.txt --- bcfg2-1.3.5/doc/reports/dynamic.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/reports/dynamic.txt 2017-01-10 19:18:17.000000000 +0000 @@ -25,6 +25,7 @@ users to drill down to find out about a :ref:`specific host `, rather than only having one huge page with too much information. +* Ability to store reporting data separately from other server data. Installation ============ @@ -214,8 +215,8 @@ ^^^^^^^^ If you choose to use a different database, you'll need to edit -``/etc/bcfg2.conf``. These fields should be updated in the [database] -section: +``/etc/bcfg2.conf``. These fields should be updated in the +``[database]`` section: * engine @@ -228,11 +229,27 @@ * host * port (optional) +To store reporting data separately from the main server data, use +the following options: + +* reporting_engine + + * ex: reporting_engine = mysql + * ex: reporting_engine = postgresql_psycopg2 + +* reporting_name +* reporting_user +* reporting_password +* reporting_host +* reporting_port (optional) + .. warning:: If mysql is used as a backend, it is recommended to use InnoDB for the `storage engine `_. +Refer to :ref:`server-database` for a full listing of +available options. statistics ^^^^^^^^^^ @@ -253,6 +270,9 @@ * web_prefix: Prefix to be added to Django's MEDIA_URL * file_limit: The maximum size of a diff or binary data to store in the database. +* max_children: Maximum number of children for the reporting + collector. Use 0 to disable the limit and spawn a thread + as soon as a working file is available. .. _dynamic_transports: diff -Nru bcfg2-1.3.5/doc/reports/index.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/reports/index.txt --- bcfg2-1.3.5/doc/reports/index.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/reports/index.txt 2017-01-10 19:18:17.000000000 +0000 @@ -24,5 +24,4 @@ .. toctree:: :maxdepth: 2 - static dynamic diff -Nru bcfg2-1.3.5/doc/reports/static.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/reports/static.txt --- bcfg2-1.3.5/doc/reports/static.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/reports/static.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,100 +0,0 @@ -.. -*- mode: rst -*- - -.. _reports-static: - -============================= -Bcfg2 Static Reporting System -============================= - -The Bcfg2 reporting system collects and displays information about the -operation of the Bcfg2 client, and the configuration states of target -machines. - -Goals -===== - -The reporting system provides an interface to administrators describing -a few important tasks - -* Client configuration state, particularly aspects that do not match the configuration specification. - Information about bad and extra configuration elements is included. -* Client execution results (a list of configuration elements that were modified) -* Client execution performance data (including operation retry counts, and timings for several critical execution regions) - -This data can be used to understand the current configuration state -of the entire network, the operations performed by the client, how the -configuration changes propagate, and any reconfiguration operations that -have failed. - -Retention Model -=============== - -The current reporting system stores statistics in an XML data store, by -default to ``/etc/statistics.xml``. It retains either one or two -statistic sets per host. If the client has a clean configuration state, -the most recent (clean) record is retained. If the client has a dirty -configuration state, two records are retained. One record is the last -clean record. The other record is the most recent record collected, -detailing the incorrect state. - -This retention model, while non-optimal, does manage to persistently -record most of the data that users would like. - -Setup -===== - -In order to configure your Bcfg2 server for receiving reports, you -will need to list the Statistics plugin in the plugins line of your -``bcfg2.conf``. You will also need a [statistics] section -in your ``bcfg2.conf``. You can find out more about what goes there in the -``bcfg2.conf`` manpage. - -Output -====== - -Several output reports can be generated from the statistics store with -the command line tool ``bcfg2-build-reports``. - -* Nodes Digest -* Nodes Individual -* Overview Statistics -* Performance - -The data generated by these reports can be delivered by several -mechanisms: - -* HTML -* Email -* RSS - -Shortcomings and Planned Enhancements -===================================== - -When designing the current reporting system, we were overly concerned with -the potential explosion in data size over time. In order to address this, -we opted to use the retention scheme described above. This approach has -several shortcomings: - -* A comprehensive list of reconfiguration operations (with associated - timestamps) isn't retained -* Client results for any given day (except the last one) aren't uniformly - retained. This means that inter-client analysis is difficult, if - not impossible - -We plan to move to a database backend to address the dataset size -problem and start retaining all information. The move to a SQL backend -will allow many more types of queries to be efficiently processed. It -will also make on-demand reports simpler. - -Other sorts of information would also be useful to track. We plan to -add the ability to tag a particular configuration element as security -related, and include this in reports. This will aid in the effective -prioritization of manual and failed reconfiguration tasks. - -Capability Goals (posed as questions) -------------------------------------- - -* What machines have not yet applied critical updates? -* How long did critical updates take to be applied? -* What configuration did machine X have on a particular date? -* When did machine X perform configuration update Y? diff -Nru bcfg2-1.3.5/doc/server/acl.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/acl.txt --- bcfg2-1.3.5/doc/server/acl.txt 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/acl.txt 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,41 @@ +.. -*- mode: rst -*- + +.. _server-access-control: + +================ + Access Control +================ + +.. versionadded:: 1.4.0 + +Bcfg2 exposes various functions via XML-RPC calls. Some of these are +relatively benign (e.g., the calls necessary to generate a client +configuration) while others can be used to inspect potentially private +data on the server or very easily mount a denial of service attack. +As a result, access control lists to limit exposure of these calls is +built in. There are two possible ACL methods: built-in, and the +:ref:`server-plugins-misc-acl` plugin. + +The built-in approach simply applies a restrictive default ACL that +lets ``localhost`` perform all XML-RPC calls, and restricts all other +machines to only the calls necessary to run the Bcfg2 client. +Specifically: + +* If the remote client is ``127.0.0.1``, the call is allowed. Note + that, depending on where your Bcfg2 server listens and how it + communicates with itself, it likely will not identify to itself as + ``localhost``. +* If the remote client is not ``127.0.0.1`` and the call is any of the + ``set_debug`` or ``toggle_debug`` methods (including + ``[toggle|set]_core_debug``), it is rejected. +* If the remote client is not ``127.0.0.1`` and the call is + ``get_statistics`` (used by ``bcfg2-admin perf``), it is rejected. +* If the remote client is not ``127.0.0.1`` and the call includes a + ``.`` -- i.e., it is dispatched to any plugin, such as + ``Packages.Refresh`` -- then it is rejected. +* Otherwise, the call is allowed. + +The built-in ACL is *only* intended to ensure that Bcfg2 is secure by +default; it will not be sufficient in many (or even most) cases. In +these cases, it's recommended that you use the +:ref:`server-plugins-misc-acl` plugin. diff -Nru bcfg2-1.3.5/doc/server/admin/bundle.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/admin/bundle.txt --- bcfg2-1.3.5/doc/server/admin/bundle.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/admin/bundle.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ -.. -*- mode: rst -*- - -.. _server-admin-bundle: - -bundle -====== - -For a list of all available xml bundles use ``list-xml``. ``list-genshi`` -will list all available genshi bundles.:: - -.. code-block:: sh - - # bcfg2-admin bundles list-xml - # bcfg2-admin bundles list-genshi - -``show`` provides an interactive dialog to get details about the available -bundles.:: - -.. code-block:: sh - - # bcfg2-admin bundles show - Available bundles (Number of bundles: 4) - ---------------------------------------- - [0] motd.xml - [1] snmpd.xml - [2] bcfg2.xml - [3] ntp.xml - Enter the line number of a bundle for details: 3 - Details for the "ntp" bundle: - Package: xntp - Path: /etc/sysconfig/xntp - Path: /etc/sysconfig/clock - Path: /etc/ntp.conf - Service: xntpd diff -Nru bcfg2-1.3.5/doc/server/admin/compare.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/admin/compare.txt --- bcfg2-1.3.5/doc/server/admin/compare.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/admin/compare.txt 2017-01-10 19:18:17.000000000 +0000 @@ -6,11 +6,10 @@ ======= Determine differences between files or directories of client -specification instances.:: +specification instances:: bcfg2-admin compare -If you want to compare two directories recursively then use ``-r`` as an -option. :: +Or:: - bcfg2-admin compare -r + bcfg2-admin compare diff -Nru bcfg2-1.3.5/doc/server/admin/index.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/admin/index.txt --- bcfg2-1.3.5/doc/server/admin/index.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/admin/index.txt 2017-01-10 19:18:17.000000000 +0000 @@ -16,14 +16,11 @@ :maxdepth: 1 backup - bundle client compare init minestruct perf pull - snapshots - tidy viz xcmd diff -Nru bcfg2-1.3.5/doc/server/admin/init.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/admin/init.txt --- bcfg2-1.3.5/doc/server/admin/init.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/admin/init.txt 2017-01-10 19:18:17.000000000 +0000 @@ -36,7 +36,6 @@ A toplevel repository structure was created under the provided path. :: /var/lib/bcfg2 - |-- Base |-- Bundler |-- Cfg |-- etc diff -Nru bcfg2-1.3.5/doc/server/admin/snapshots.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/admin/snapshots.txt --- bcfg2-1.3.5/doc/server/admin/snapshots.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/admin/snapshots.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -.. -*- mode: rst -*- - -.. _server-admin-snapshots: - -snapshots -========= - -Interact with the Snapshots system. diff -Nru bcfg2-1.3.5/doc/server/admin/tidy.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/admin/tidy.txt --- bcfg2-1.3.5/doc/server/admin/tidy.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/admin/tidy.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -.. -*- mode: rst -*- - -.. _server-admin-tidy: - -tidy -==== - -Clean up useless files in the repo. diff -Nru bcfg2-1.3.5/doc/server/caching.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/caching.txt --- bcfg2-1.3.5/doc/server/caching.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/caching.txt 2017-01-10 19:18:17.000000000 +0000 @@ -1,4 +1,5 @@ .. -*- mode: rst -*- +.. vim: ft=rst .. _server-caching: @@ -13,7 +14,7 @@ Caching (or, rather, cache expiration) is always a difficult problem, but it's particularly vexing in Bcfg2 due to the number of different -data sources incorporated. In 1.3.0, we introduce some limited +data sources incorporated. In 1.3.0, we introduced some limited caching of client metadata objects. Since a client metadata object can be generated anywhere from 7 to dozens of times per client run (depending on your templates), and since client metadata generation @@ -42,15 +43,15 @@ biggest speed boost. ``off`` will never result in stale data, but it gives no speed boost. -In addition to the :ref:`server-plugins-grouping-metadata` plugin, -Bcfg2 includes three plugins that can set additional groups, and thus -may affect the caching behavior. They are -:ref:`server-plugins-grouping-grouppatterns`, -:ref:`server-plugins-probes-index`, and +In addition to the :ref:`server-plugins-grouping-metadata` +plugin, Bcfg2 includes three plugins that can set additional +groups, and thus may affect the caching behavior. They +are :ref:`server-plugins-grouping-grouppatterns`, +:ref:`server-plugins-probes`, and :ref:`server-plugins-connectors-puppetenc`. All of those plugins -**except** for PuppetENC fully support all caching levels. PuppetENC -is incompatible with ``aggressive``, and may result in some stale data -with ``cautious``. +**except** for PuppetENC fully support all caching levels. PuppetENC is +incompatible with ``aggressive``, and may result in some stale data with +``cautious``. If you are not using the PuppetENC plugin, and do not have any custom plugins that provide additional groups, then all four modes should be diff -Nru bcfg2-1.3.5/doc/server/configurationentries.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/configurationentries.txt --- bcfg2-1.3.5/doc/server/configurationentries.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/configurationentries.txt 2017-01-10 19:18:17.000000000 +0000 @@ -28,7 +28,7 @@ .. code-block:: xml - + diff -Nru bcfg2-1.3.5/doc/server/configuration.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/configuration.txt --- bcfg2-1.3.5/doc/server/configuration.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/configuration.txt 2017-01-10 19:18:17.000000000 +0000 @@ -55,9 +55,7 @@ plugin. If you depend on this capability (e.g., if your specification is stored in a VCS and checked out onto the Bcfg2 server by a script running as the ``bcfg2`` user), then you would want to ``chown`` and -``chmod`` ``/var/lib/bcfg2`` rather than ``/var/lib/bcfg2/*``. Note -also that the recursive ``chmod`` will change permissions on any files -that are using ``mode="inherit"`` in :ref:`server-info`. +``chmod`` ``/var/lib/bcfg2`` rather than ``/var/lib/bcfg2/*``. The Bcfg2 server also needs to be able to read its SSL certificate, key and the SSL CA certificate: diff -Nru bcfg2-1.3.5/doc/server/database.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/database.txt --- bcfg2-1.3.5/doc/server/database.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/database.txt 2017-01-10 19:18:17.000000000 +0000 @@ -1,4 +1,5 @@ .. -*- mode: rst -*- +.. vim: ft=rst .. _server-database: @@ -9,21 +10,38 @@ .. versionadded:: 1.3.0 Several Bcfg2 plugins, including -:ref:`server-plugins-grouping-metadata`, -:ref:`server-plugins-probes-index`, and -:ref:`server-plugins-statistics-reporting`, can connect use a -relational database to store data. They use the global database -settings in ``bcfg2.conf``, described in this document, to connect. +:ref:`server-plugins-grouping-metadata`, :ref:`server-plugins-probes`, and +:ref:`server-plugins-statistics-reporting`, can connect use a relational +database to store data. They use the global database settings in +``bcfg2.conf``, described in this document, to connect. .. note:: Although SQLite is supported as a database, it may cause - significant thread contention (and a performance penalty) if you - use SQLite with :ref:`server-plugins-grouping-metadata` or - :ref:`server-plugins-probes-index`. If you are using the - database-backed features of either of those plugins, it's - recommended that you use a higher performance database backend. + significant thread contention (and a performance penalty) if + you use SQLite with :ref:`server-plugins-grouping-metadata` or + :ref:`server-plugins-probes`. If you are using the database-backed + features of either of those plugins, it's recommended that you use + a higher performance database backend. + + +Separate Reporting Database +=========================== + +.. versionadded:: 1.4.0 + +Bcfg2 supports storing the data generated by the +:ref:`server-plugins-statistics-reporting` in a separate +database from the data generated by the other plugins (e.g. +:ref:`server-plugins-grouping-metadata` and :ref:`server-plugins-probes`). +To activate this support, set the ``reporting_engine``, +``reporting_name``, ``reporting_user``, etc. options in the +``[database]`` section of the config file. The valid values for the +``reporting_*`` options are the same as for the standard database +options. See :ref:`server-database-configuration-options` for a full +listing. +.. _server-database-configuration-options: Configuration Options ===================== @@ -31,29 +49,47 @@ All of the following options should go in the ``[database]`` section of ``/etc/bcfg2.conf``. -+-------------+------------------------------------------------------------+-------------------------------+ -| Option name | Description | Default | -+=============+============================================================+===============================+ -| engine | The name of the Django database backend to use. See | "sqlite3" | -| | https://docs.djangoproject.com/en/dev/ref/settings/#engine | | -| | for available options (note that django.db.backends is not | | -| | included in the engine name) | | -+-------------+------------------------------------------------------------+-------------------------------+ -| name | The name of the database | "/var/lib/bcfg2/bcfg2.sqlite" | -+-------------+------------------------------------------------------------+-------------------------------+ -| user | The user to connect to the database as | None | -+-------------+------------------------------------------------------------+-------------------------------+ -| password | The password to connect to the database with | None | -+-------------+------------------------------------------------------------+-------------------------------+ -| host | The host to connect to | "localhost" | -+-------------+------------------------------------------------------------+-------------------------------+ -| port | The port to connect to | None | -+-------------+------------------------------------------------------------+-------------------------------+ -| options | Extra parameters to use when connecting to the database. | None | -| | Available parameters vary depending on your database | | -| | backend. The parameters are supplied as comma separated | | -| | key=value pairs. | | -+-------------+------------------------------------------------------------+-------------------------------+ ++--------------------+------------------------------------------------------------+---------------------------------------+ +| Option name | Description | Default | ++====================+============================================================+=======================================+ +| engine | The name of the Django database backend to use. See | "sqlite3" | +| | https://docs.djangoproject.com/en/dev/ref/settings/#engine | | +| | for available options (note that django.db.backends is not | | +| | included in the engine name) | | ++--------------------+------------------------------------------------------------+---------------------------------------+ +| name | The name of the database | "/var/lib/bcfg2/etc/bcfg2.sqlite" | ++--------------------+------------------------------------------------------------+---------------------------------------+ +| user | The user to connect to the database as | None | ++--------------------+------------------------------------------------------------+---------------------------------------+ +| password | The password to connect to the database with | None | ++--------------------+------------------------------------------------------------+---------------------------------------+ +| host | The host to connect to | "localhost" | ++--------------------+------------------------------------------------------------+---------------------------------------+ +| port | The port to connect to | None | ++--------------------+------------------------------------------------------------+---------------------------------------+ +| options | Extra parameters to use when connecting to the database. | None | +| | Available parameters vary depending on your database | | +| | backend. The parameters are supplied as the value of the | | +| | django OPTIONS setting. | | ++--------------------+------------------------------------------------------------+---------------------------------------+ +| reporting_engine | The name of the Django database backend to use for the | None | +| | reporting database. Takes the same values as ``engine``. | | ++--------------------+------------------------------------------------------------+---------------------------------------+ +| reporting_name | The name of the reporting database | "/var/lib/bcfg2/etc/reporting.sqlite" | ++--------------------+------------------------------------------------------------+---------------------------------------+ +| reporting_user | The user to connect to the reporting database as | None | ++--------------------+------------------------------------------------------------+---------------------------------------+ +| reporting_password | The password to connect to the reporting database with | None | ++--------------------+------------------------------------------------------------+---------------------------------------+ +| reporting_host | The host to connect to for the reporting database | "localhost" | ++--------------------+------------------------------------------------------------+---------------------------------------+ +| reporting_port | The port to connect to for the reporting database | None | ++--------------------+------------------------------------------------------------+---------------------------------------+ +| reporting_options | Extra parameters to use when connecting to the reporting | None | +| | database. Available parameters vary depending on your | | +| | database backend. The parameters are supplied as the | | +| | value of the django OPTIONS setting. | | ++--------------------+------------------------------------------------------------+---------------------------------------+ Database Schema Sync diff -Nru bcfg2-1.3.5/doc/server/encryption.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/encryption.txt --- bcfg2-1.3.5/doc/server/encryption.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/encryption.txt 2017-01-10 19:18:17.000000000 +0000 @@ -1,4 +1,5 @@ .. -*- mode: rst -*- +.. vim: ft=rst .. _server-encryption: @@ -24,7 +25,7 @@ single Bcfg2 repository with multiple admins who should not necessarily have access to each other's sensitive data. -Two types of data can be encrypted: +Two basic types of data can be encrypted: * :ref:`server-plugins-generators-cfg` files can be encrypted as whole files. See :ref:`server-plugins-generators-cfg-encryption` @@ -51,6 +52,13 @@ amongst different teams, this lets teams collaborate more closely on files and other data. +Other types of data that can be encrypted are: + +* Text content of Path tags in + :ref:`server-plugins-structures-bundler` +* Passphrases in XML description files for generated + :ref:`server-plugins-generators-cfg-sshkeys` + .. _bcfg2-crypt: bcfg2-crypt @@ -203,6 +211,8 @@ openssl list-cipher-algorithms | grep -v ' => ' | \ tr 'A-Z-' 'a-z_' | sort -u +.. _server-encryption-lax-strict: + Lax vs. Strict decryption ------------------------- @@ -215,15 +225,19 @@ decryption in the ``[encryption]`` section of ``bcfg2.conf``:: [encryption] - decrypt = lax + lax_decryption = true This causes a failed decrypt to produce a warning only, not an error. This can be overridden by individual XML files by setting -``decrypt="strict"`` on the top-level tag (or, vice-versa; if strict -is the default an XML file can specify ``decrypt="lax"``. +``lax_decryption="false"`` on the top-level tag (or, vice-versa; if +strict is the default an XML file can specify +``lax_decryption="true"``. + +Note that you could, for instance, set lax decryption by default, and +then disable it on individual files. Encryption API ============== -.. automodule:: Bcfg2.Encryption +.. automodule:: Bcfg2.Server.Encryption diff -Nru bcfg2-1.3.5/doc/server/genshi-xml.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/genshi-xml.txt --- bcfg2-1.3.5/doc/server/genshi-xml.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/genshi-xml.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -.. -*- mode: rst -*- - -.. _xml-genshi-reference: - -=============================== - Genshi XML Template Reference -=============================== - -Genshi's XML templating language is used in -:ref:`server-plugins-structures-bundler-index` for templated bundles. -The language is described in depth at `Genshi -`_. The XML schema reference follows. - -Genshi Tags -=========== - -.. xml:group:: genshiElements - :namespace: py - -Genshi Attributes -================= - -.. xml:attributegroup:: genshiAttrs - :namespace: py diff -Nru bcfg2-1.3.5/doc/server/index.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/index.txt --- bcfg2-1.3.5/doc/server/index.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/index.txt 2017-01-10 19:18:17.000000000 +0000 @@ -26,11 +26,11 @@ admin/index configurationentries info - snapshots/index bcfg2-info selinux configuration database caching encryption - genshi-xml + xml-common + acl diff -Nru bcfg2-1.3.5/doc/server/info.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/info.txt --- bcfg2-1.3.5/doc/server/info.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/info.txt 2017-01-10 19:18:17.000000000 +0000 @@ -7,8 +7,7 @@ ======== Various file properties for entries served by most generator plugins, -including :ref:`server-plugins-generators-cfg`, -:ref:`server-plugins-generators-sslca`, and +including :ref:`server-plugins-generators-cfg` and :ref:`server-plugins-generators-sshbase`, are controlled through the use of ``info.xml`` files. @@ -53,25 +52,3 @@ See :ref:`server-selinux` for more information on the ``secontext`` attribute and managing SELinux in general. - -:info and info files -==================== - -.. deprecated:: 1.3.0 - -Historically, Bcfg2 also accepted the use of ``:info`` and ``info`` -files, which function the same as ``info.xml``, but are not XML. They -lack the ability to specify different permissions based on client, -group, or path, and cannot be used to specify ACLs, either. - -An example ``:info`` or ``info`` file would look like:: - - owner: www - group: www - mode: 0755 - -All attributes allowed on the ```` tag of an ``info.xml`` file -can be used in an ``:info`` or ``info`` file. - -You should not use more than one ``:info``, ``info``, or ``info.xml`` -file for a single entry. diff -Nru bcfg2-1.3.5/doc/server/plugins/connectors/grouplogic.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/connectors/grouplogic.txt --- bcfg2-1.3.5/doc/server/plugins/connectors/grouplogic.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/connectors/grouplogic.txt 2017-01-10 19:18:17.000000000 +0000 @@ -92,9 +92,9 @@ .. code-block:: xml - + - + diff -Nru bcfg2-1.3.5/doc/server/plugins/connectors/properties.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/connectors/properties.txt --- bcfg2-1.3.5/doc/server/plugins/connectors/properties.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/connectors/properties.txt 2017-01-10 19:18:17.000000000 +0000 @@ -120,6 +120,8 @@ [properties] writes_enabled = false +.. _server-plugins-connectors-properties-xml: + XML Property Files ------------------ @@ -266,47 +268,13 @@ .. versionadded:: 1.3.0 You can encrypt selected data in XML Properties files to protect that -data from other people who need access to the repository. See -:ref:`server-encryption-configuration` for details on configuring -encryption passphrases. The data is decrypted transparently -on-the-fly by the server; you never need to decrypt the data in your -templates. Encryption is only supported on XML properties files. - -.. note:: - - This feature is *not* intended to secure the files against a - malicious attacker who has gained access to your Bcfg2 server, as - the encryption passphrases are held in plaintext in - ``bcfg2.conf``. This is only intended to make it easier to use a - single Bcfg2 repository with multiple admins who should not - necessarily have access to each other's sensitive data. - -Properties files are encrypted on a per-element basis; that is, rather -than encrypting the whole file, only the character content of -individual elements is encrypted. This makes it easier to track -changes to the file in a VCS, and also lets unprivileged users work -with the other data in the file. Only character content of an element -can be encrypted; attribute content and XML elements themselves cannot -be encrypted. - -By default, decryption is *strict*; that is, if any element cannot be -decrypted, parsing of the file is aborted. If you wish for parsing to -continue, with unencryptable elements simply skipped, then you can set -decryption to *lax* in one of two ways: - -* Set ``decrypt=lax`` in the ``[encryption]`` section of - ``bcfg2.conf`` to set lax decryption on all files by default; or -* Set the ``decrypt="lax"`` attribute on the top-level ``Properties`` - tag of a Properties file to set lax decryption for a single file. - -Note that you could, for instance, set lax decryption by default, and -then set strict decryption on individual files. - -To encrypt or decrypt a file, use :ref:`bcfg2-crypt`. - -See :ref:`server-encryption` for more details on encryption in Bcfg2 -in general. +data from other people who need access to the repository. The +data is decrypted transparently on-the-fly by the server; you never +need to decrypt the data in your templates. Encryption is only +supported on XML properties files. +See :ref:`server-encryption` for details on encryption in general, and +:ref:`xml-encryption` for details on encryption in XML files. Accessing Properties contents from Genshi Templates =================================================== diff -Nru bcfg2-1.3.5/doc/server/plugins/connectors/templatehelper.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/connectors/templatehelper.txt --- bcfg2-1.3.5/doc/server/plugins/connectors/templatehelper.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/connectors/templatehelper.txt 2017-01-10 19:18:17.000000000 +0000 @@ -31,7 +31,7 @@ Writing Helpers =============== -A helper module is just a Python module with three special conditions: +A helper module is just a Python module with several special conditions: * The filename must end with ``.py`` * The module must have an attribute, ``__export__``, that lists all of @@ -43,28 +43,40 @@ an underscore or double underscore is bad form, and may also produce errors. +Additionally, the module *may* have an attribute, ``__default__``, +that lists all of the symbols that you wish to include by default in +the template namespace. ``name``, ``metadata``, ``source_path``, +``repo``, and ``path`` are reserved names, and should not be included +in ``__default__``. + See ``examples/TemplateHelper`` for examples of helper modules. Usage ===== -Specific helpers can be referred to in -templates as ``metadata.TemplateHelper[]``. That accesses -a HelperModule object will have, as attributes, all symbols listed in +Specific helpers can be referred to in templates as +``metadata.TemplateHelper[]``. That returns a HelperModule +object which will have, as attributes, all symbols listed in ``__export__``. For example, consider this helper module:: __export__ = ["hello"] - + __default__ = ["pining"] + def hello(metadata): return "Hello, %s!" % metadata.hostname + def pining(text): + return "It's pinin' for the %s!" % text + To use this in a Genshi template, we could do:: ${metadata.TemplateHelper['hello'].hello(metadata)} + ${pining("fjords")} The template would produce:: Hello, foo.example.com! + It's pinin' for the fjords! Note that the client metadata object is not passed to a helper module in any magical way; if you want to access the client metadata object diff -Nru bcfg2-1.3.5/doc/server/plugins/generators/account.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/account.txt --- bcfg2-1.3.5/doc/server/plugins/generators/account.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/account.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,115 +0,0 @@ -.. -*- mode: rst -*- - -.. _server-plugins-generators-account: - -======= -Account -======= - -The account plugin manages authentication data, including - -* ``/etc/passwd`` -* ``/etc/group`` -* ``/etc/security/limits.conf`` -* ``/etc/sudoers`` -* ``/root/.ssh/authorized_keys`` - -User access data is stored in three files in the Account directory: - -* superusers (a list of users who always have root privs) -* rootlist (a list of user:host pairs for scoped root privs) -* useraccess (a list of user:host pairs for login access) - -SSH keys are stored in files named $username.key; these are installed -into root's authorized keys for users in the superusers list as well as -for the pertitent users in the rootlike file (for the current system). - -Authentication data is read in from (static|dyn).(passwd|group) The static -ones are for system local ones, while the dyn. versions are for external -synchronization (from ldap/nis/etc). There is also a static.limits.conf -that provides the limits.conf header and any static entries. - -Files in the Account directory: - -``.key`` - - **Format**: The SSH public key for user . - - If the user is in the "rootlike" or "superusers" group, these - keys will be appended to ``/root/.ssh/auth`` - -``useraccess`` - - **Format**: "user:hostname" on each line. - - Describes who may login where (via PAMs - ``/etc/security/limits.conf``). Everybody else will be denied - access.(?) - - **Example**: - - If Alice should be able to access host "foo", Bob should access - "foo" and "bar":: - - alice:foo.example.com - bob:foo.example.com - bob:bar.example.com - -``rootlike`` - - **Format**: "user:hostname" on each line. - - Describes who will be allowed root access where. The user may - login via public key and use sudo. - - **Example**: - - If Chris should be root only on host "foo":: - - chris:foo.example.com - -``superusers`` - - **Format**: usernames, separated by spaces or newlines. (Any whitespace that makes pythons split() happy.) - - Describes who will be allowed root access on all hosts. The user - may login via public key and use sudo. - - **Example**: - - Daniel, Eve and Faith are global admins:: - - daniel eve - faith - -``static.passwd``, ``static.group`` - - **Format**: Lines from ``/etc/passwd`` or ``/etc/group`` - - These entries are appended to the passwd and group files - (in addition to the auto-generated entries from "useraccess", - "rootlike" and "superusers" above) without doing anything else. - -``dyn.passwd``, ``dyn.group`` - - **Format**: Lines from ``/etc/passwd`` or ``/etc/group`` - - Similar to "static.*" above, but for entries that are managed "on - the network" (yp, LDAP, ...), so it is most likely periodically - (re)filled. - -``static.limits.conf`` - - **Format**: Lines from ``/etc/security/limit.conf`` - - These limits will be appended to limits.conf (in addition to - the auto-generated entries from "useraccess", "rootlike" and - "superusers" above). - -``static.sudoers`` - - **Format**: Lines from ``/etc/sudoers`` - - These lines will be appended to to sudoers file (in addition - to the auto-generated entries from "useraccess", "rootlike" and - "superusers" above). diff -Nru bcfg2-1.3.5/doc/server/plugins/generators/cfg.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/cfg.txt --- bcfg2-1.3.5/doc/server/plugins/generators/cfg.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/cfg.txt 2017-01-10 19:18:17.000000000 +0000 @@ -1,4 +1,5 @@ .. -*- mode: rst -*- +.. vim: ft=rst .. _server-plugins-generators-cfg: @@ -29,8 +30,8 @@ ``/etc/pam.d/sshd``, goes in ``Cfg/etc/pam.d/sshd/sshd``. The reason for the like-name directory is to allow multiple versions of each file to exist, as described below. Note that these files are exact copies of what -will appear on the client machine (except when using Genshi or Cheetah -templating -- see below). +will appear on the client machine (except when using templates -- see +below). Group-Specific Files ==================== @@ -102,9 +103,8 @@ ---------------- Genshi templates allow you to use the `Genshi -`_ templating system. This is similar to -the deprecated :ref:`server-plugins-generators-tgenshi-index` plugin. -Genshi templates should be named with a ``.genshi`` extension, e.g.:: +`_ templating system. Genshi templates +should be named with a ``.genshi`` extension, e.g.:: % ls Cfg/etc/motd info.xml motd.genshi @@ -214,9 +214,8 @@ ----------------- Cheetah templates allow you to use the `cheetah templating system -`_. This is similar to -the deprecated :ref:`server-plugins-generators-tcheetah` plugin. -Cheetah templates should be named with a ``.cheetah`` extension, e.g.:: +`_. Cheetah templates should be +named with a ``.cheetah`` extension, e.g.:: % ls Cfg/etc/motd info.xml motd.cheetah @@ -243,6 +242,27 @@ # This is a comment in my template which will be stripped when it's processed through Cheetah \# This comment will appear in the generated config file. +.. _server-plugins-generators-cfg-jinja2: + +Jinja2 Templates +----------------- + +Jinja2 templates allow you to use the `jinja2 templating system +`_. Jinja2 templates should be +named with a ``.jinja2`` extension, e.g.:: + + % ls Cfg/etc/motd + info.xml motd.jinja2 + +Examples +~~~~~~~~ + +.. toctree:: + :glob: + :maxdepth: 1 + + examples/jinja2/* + Inside Templates ---------------- @@ -255,19 +275,17 @@ | | ` | +-------------+--------------------------------------------------------+ | name | The value of the ``name`` attribute as specified in | -| | the Path entry in Bcfg2. If an :ref:`altsrc | -| | ` attribute is used, | -| | then ``name`` will be the value of that attribute. | +| | the Path entry in Bcfg2. | +-------------+--------------------------------------------------------+ | source_path | The path to the template file on the filesystem | +-------------+--------------------------------------------------------+ | repo | The path to the Bcfg2 repository on the filesystem | +-------------+--------------------------------------------------------+ | path | In Genshi templates, ``path`` is a synonym for | -| | ``source_path``. In Cheetah templates, it's a synonym | -| | for ``name``. For this reason, use of ``path`` is | -| | discouraged, and it may be deprecated in a future | -| | release. | +| | ``source_path``. In Cheetah templates and Jinja2 | +| | templates, it's a synonym for ``name``. For this | +| | reason, use of ``path`` is discouraged, and it may be | +| | deprecated in a future release. | +-------------+--------------------------------------------------------+ To access these variables in a Genshi template, you can simply use the @@ -275,6 +293,10 @@ Path to this file: ${name} +Similarly, in a Jinja2 template:: + + Path to this file: {{ name }} + In a Cheetah template, the variables are properties of ``self``, e.g.:: @@ -284,28 +306,19 @@ ------------------------ Templates can be host and group specific as well. Deltas will not be -processed for any Genshi or Cheetah base file. +processed for any Genshi, Cheetah, or Jinja2 base file. .. note:: If you are using templating in combination with host-specific or group-specific files, you will need to ensure that the ``.genshi`` - or ``.cheetah`` extension is at the **end** of the filename. Using the - examples from above for *host.example.com* and group *server* you would - have the following:: + ``.cheetah`` or ``.jinja2`` extension is at the **end** of the filename. + Using the examples from above for *host.example.com* and group *server* + you would have the following:: Cfg/etc/fstab/fstab.H_host.example.com.genshi Cfg/etc/fstab/fstab.G50_server.cheetah -Genshi templates take precence over cheetah templates. For example, if -two files exist named:: - - Cfg/etc/fstab/fstab.genshi - Cfg/etc/fstab/fstab.cheetah - -The Cheetah template is ignored. Exploiting this fact is probably a -pretty bad idea in practice. - You can mix Genshi and Cheetah when using different host-specific or group-specific files. For example:: @@ -346,7 +359,7 @@ Cfg/etc/foo.conf/foo.conf.crypt Cfg/etc/foo.conf/foo.conf.G10_foo.crypt -Encrypted Genshi or Cheetah templates can have the extensions in +Encrypted Genshi, Cheetah, and Jinja2 templates can have the extensions in either order, e.g.:: Cfg/etc/foo.conf/foo.conf.crypt.genshi @@ -415,7 +428,7 @@ in general. ``pubkey.xml`` -~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~ ``pubkey.xml`` only ever contains a single line: @@ -475,7 +488,7 @@ .. code-block:: xml - + U2FsdGVkX19xACol83uyPELP94s4CmngD12oU6PLLuE= @@ -563,110 +576,162 @@ Hopefully, the performance concerns can be resolved in a future release and these features can be added. +.. _server-plugins-generators-cfg-ssl-certificates: + +SSL Keys and Certificates +========================= + +Cfg can also create SSL keys and certs on the fly, and store the +generated data in the repo so that subsequent requests do not result +in repeated key/cert recreation. In the event that a new key or cert +is needed, the old file can simply be removed from the +repository, and the next time that host checks in, a new file will be +created. If that file happens to be the key, any dependent +certificates will also be regenerated. + +See also :ref:`appendix-guides-sslca_howto` for a detailed example +that uses the SSL key management feature to automate Bcfg2 certificate +authentication. + +Getting started +--------------- + +In order to use the SSL certificate generation feature, you must first +have at least one CA configured on your system. For details on +setting up your own OpenSSL based CA, please see +http://www.openssl.org/docs/apps/ca.html for details of the suggested +directory layout and configuration directives. + +For SSL cert generation to work, the openssl.cnf (or other +configuration file) for that CA must contain full (not relative) +paths. + +#. Add a section to your ``/etc/bcfg2.conf`` called ``sslca_foo``, + replacing foo with the name you wish to give your CA so you can + reference it in certificate definitions. (If you only have one CA, + you can name it ``sslca_default``, and it will be the default CA + for all other operations.) + +#. Under that section, add a ``config`` option that gives the location + of the ``openssl.cnf`` file for your CA. + +#. If necessary, add a ``passphrase`` option containing the passphrase + for the CA's private key. If no passphrase is entry exists, it is + assumed that the private key is stored unencrypted. + +#. Optionally, add a ``chaincert`` option that points to the location + of your ssl chaining certificate. This is used when preexisting + certificate hostfiles are found, so that they can be validated and + only regenerated if they no longer meet the specification. If + you're using a self signing CA this would be the CA cert that you + generated. If the chain cert is a root CA cert (e.g., if it is a + self-signing CA), also add an entry ``root_ca = true``. If + ``chaincert`` is omitted, certificate verification will not be + performed. + +#. Once all this is done, you should have a section in your + ``/etc/bcfg2.conf`` that looks similar to the following:: + + [sslca_default] + config = /etc/pki/CA/openssl.cnf + passphrase = youReallyThinkIdShareThis? + chaincert = /etc/pki/CA/chaincert.crt + root_ca = true + +#. You are now ready to create key and certificate definitions. For + this example we'll assume you've added Path entries for the key, + ``/etc/pki/tls/private/localhost.key``, and the certificate, + ``/etc/pki/tls/certs/localhost.crt`` to a bundle. + +#. Within the ``Cfg/etc/pki/tls/private/localhost.key`` directory, + create a `sslkey.xml`_ file containing the following: + + .. code-block:: xml + + + +#. This will cause the generation of an SSL key when a client requests + that Path. (By default, it will be a 2048-bit RSA key; see + `sslkey.xml`_ for details on how to change the key type and size.) + +#. Similarly, create `sslcert.xml`_ in + ``Cfg/etc/pki/tls/certs/localhost.crt/``, containing the following: + + .. code-block:: xml + + + + + +#. When a client requests the cert path, a certificate will be + generated using the key hostfile at the specified key location, + using the CA matching the ``ca`` attribute. ie. ``ca="foo"`` will + match ``[sslca_default]`` in your ``/etc/bcfg2.conf`` + +The :ref:`Bcfg2 bundle example +` contains entries to +automate the process of setting up a CA. + Configuration ------------- -In addition to ``privkey.xml`` and ``authorized_keys.xml``, described -above, the behavior of the SSH key generation feature can be -influenced by several options in the ``[sshkeys]`` section of -``bcfg2.conf``: +``bcfg2.conf`` +~~~~~~~~~~~~~~ -+----------------+---------------------------------------------------------+-----------------------+------------+ -| Option | Description | Values | Default | -+================+=========================================================+=======================+============+ -| ``passphrase`` | Use the named passphrase to encrypt private keys on the | String | None | -| | filesystem. The passphrase must be defined in the | | | -| | ``[encryption]`` section. See :ref:`server-encryption` | | | -| | for more details on encryption in Bcfg2 in general. | | | -+----------------+---------------------------------------------------------+-----------------------+------------+ -| ``category`` | Generate keys specific to groups in the given category. | String | None | -| | It is best to pick a category that all clients have a | | | -| | group from. | | | -+----------------+---------------------------------------------------------+-----------------------+------------+ +In ``bcfg2.conf``, you must declare your CA(s) in ``[sslca_]`` +sections. At least one is required. Valid options are detailed +below, in `Cfg Configuration`_. -Deltas -====== +Only the ``config`` option is required; i.e., the simplest possible CA +section is:: -.. note:: + [sslca_default] + config = /etc/pki/CA/openssl.cnf + +``sslcert.xml`` +~~~~~~~~~~~~~~~ + +.. xml:schema:: sslca-cert.xsd + :linktotype: + :inlinetypes: CertType + +Example +^^^^^^^ + +.. code-block:: xml + + + test.example.com + + + + + + + + +``sslkey.xml`` +~~~~~~~~~~~~~~ + +.. xml:schema:: sslca-key.xsd + :linktotype: + :inlinetypes: KeyType + +Example +^^^^^^^ + +.. code-block:: xml + + + + + + + + + - In Bcfg2 1.3 and newer, deltas are deprecated. It is recommended - that you use templates instead. The - :ref:`TemplateHelper plugin - ` comes with an example - helper that can be used to include other files easily, a subset of - cat file functionality. ``bcfg2-lint`` checks for deltas and - warns about them. - -.. warning:: - - In Bcfg2 1.3, deltas **do not** work with `SSH key or - authorized_keys generation `_. - -Bcfg2 has finer grained control over how to deliver configuration -files to a host. Let's say we have a Group named file-server. Members -of this group need the exact same ``/etc/motd`` as all other hosts except -they need one line added. We could copy motd to ``motd.G01_file-server``, -add the one line to the Group specific version and be done with it, -but we're duplicating data in both files. What happens if we need to -update the motd? We'll need to remember to update both files then. Here's -where deltas come in. A delta is a small change to the base file. There -are two types of deltas: cats and diffs. The cat delta simply adds or -removes lines from the base file. The diff delta is more powerful since -it can take a unified diff and apply it to the base configuration file -to create the specialized file. Diff deltas should be used very sparingly. - -Cat Files ---------- - -Continuing our example for cat files, we would first create a file named -``motd.G01_file-server.cat``. The .cat suffix designates that the file is -a diff. We would then edit that file and add the following line:: - - +This is a file server - -The **+** at the begining of the file tells Bcfg2 that the line should be -appended to end of the file. You can also start a line with **-** to tell -Bcfg2 to remove that exact line wherever it might be in the file. How do -we know what base file Bcfg2 will choose to use to apply a delta? The -same rules apply as before: Bcfg2 will choose the highest priority, -most specific file as the base and then apply deltas in the order of -most specific and then increasing in priority. What does this mean in -real life. Let's say our machine is a web server, mail server, and file -server and we have the following configuration files:: - - motd - motd.G01_web-server - motd.G01_mail-server.cat - motd.G02_file-server.cat - motd.H_bar.example.com - motd.H_foo.example.com.cat - -If our machine isn't *foo.example.com* or *bar.example.com*, but -is a web server, then Bcfg2 would choose ``motd.G01_web-server`` as -the base file. It is the most specific base file for this host. Bcfg2 -would apply the ``motd.G01_mail-server.cat`` delta to the -``motd.G01_web-server`` base file. It is the least specific -delta. Bcfg2 would then apply the ``motd.G02_file-server.cat`` delta -to the result of the delta before it. - -If our machine is *foo.example.com* and a web server, then Bcfg2 would -choose ``motd.G01_web-server`` as the base file. It is the most -specific base file for this host. Bcfg2 would apply the -``motd.H_foo.example.com.cat`` delta to the ``motd.G01_web-server`` -base file. The reason the other deltas aren't applied to -*foo.example.com* is because a **.H_** delta is more specific than a -**.G##_** delta. Bcfg2 applies all the deltas at the most specific -level. - -If our machine is *bar.example.com*, then Bcfg2 would chose -``motd.H_foo.example.com`` as the base file because it is the most -specific base file for this host. Regardless of the groups -*bar.example.com* is a member of, **no cat files** would be applied, -because only cat files as specific or more specific than the base file -are applied. (In other words, if a group-specific base file is -selected, only group- or host-specific cat files can be applied; if a -host-specific base file is selected, only host-specific cat files can -be applied.) .. _server-plugins-generators-cfg-validation: @@ -721,3 +786,56 @@ File permissions for entries handled by Cfg are controlled via the use of :ref:`server-info` files. Note that you **cannot** use both a Permissions entry and a Path entry to handle the same file. + +.. _server-plugins-generators-cfg-configuration: + +Cfg Configuration +================= + +The behavior of many bits of the Cfg plugin can be configured in +``bcfg2.conf`` with the following options. + +In addition to ``privkey.xml`` and ``authorized_keys.xml``, described +above, the behavior of the SSH key generation feature can be +influenced by several options in the ``[sshkeys]`` section of +``bcfg2.conf``: + ++-------------+----------------+---------------------------------------------------------+-----------------------+------------+ +| Section | Option | Description | Values | Default | ++=============+================+=========================================================+=======================+============+ +| ``cfg`` | ``passphrase`` | Use the named passphrase to encrypt created data on the | String | None | +| | | filesystem. (E.g., SSH and SSL keys.) The passphrase | | | +| | | must be defined in the ``[encryption]`` section. | | | ++-------------+----------------+---------------------------------------------------------+-----------------------+------------+ +| ``cfg`` | ``category`` | Generate data (e.g., SSH keys, SSL keys and certs) | String | None | +| | | specific to groups in the given category. It is best to | | | +| | | pick a category that all clients have a group from. | | | ++-------------+----------------+---------------------------------------------------------+-----------------------+------------+ +| ``cfg`` | ``validation`` | Whether or not to perform `Content Validation`_ | Boolean | True | +| | | specific to groups in the given category. It is best to | | | +| | | pick a category that all clients have a group from. | | | ++-------------+----------------+---------------------------------------------------------+-----------------------+------------+ +| ``sshkeys`` | ``passphrase`` | Override the global Cfg passphrase with a specific | String | None | +| | | passphrase for encrypting created SSH private keys. | | | ++-------------+----------------+---------------------------------------------------------+-----------------------+------------+ +| ``sshkeys`` | ``category`` | Override the global Cfg category with a specific | String | None | +| | | category for created SSH keys. | | | ++-------------+----------------+---------------------------------------------------------+-----------------------+------------+ +| ``sslca`` | ``passphrase`` | Override the global Cfg passphrase with a specific | String | None | +| | | passphrase for encrypting created SSL keys. | | | ++-------------+----------------+---------------------------------------------------------+-----------------------+------------+ +| ``sslca`` | ``category`` | Override the global Cfg category with a specific | String | None | +| | | category for created SSL keys and certs. | | | ++-------------+----------------+---------------------------------------------------------+-----------------------+------------+ +| ``sslca_*`` | ``config`` | Path to the openssl config for the CA | String | None | ++-------------+----------------+---------------------------------------------------------+-----------------------+------------+ +| ``sslca_*`` | ``passphrase`` | Passphrase for the CA private key | String | None | ++-------------+----------------+---------------------------------------------------------+-----------------------+------------+ +| ``sslca_*`` | ``chaincert`` | Path to the SSL chaining certificate for verification | String | None | ++-------------+----------------+---------------------------------------------------------+-----------------------+------------+ +| ``sslca_*`` | ``root_ca`` | Whether or not ```` is a root CA (as | Boolean | False | +| | | opposed to an intermediate cert) | | | ++-------------+----------------+---------------------------------------------------------+-----------------------+------------+ + +See :ref:`server-encryption` for more details on encryption in Bcfg2 +in general. diff -Nru bcfg2-1.3.5/doc/server/plugins/generators/decisions.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/decisions.txt --- bcfg2-1.3.5/doc/server/plugins/generators/decisions.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/decisions.txt 2017-01-10 19:18:17.000000000 +0000 @@ -29,18 +29,23 @@ is not used. See `Decision Mode`_ below. The Decisions plugin uses a directory in the Bcfg2 repository called -Decisions. Files in the Decisions subdirectory are named similarly to -files managed by Cfg and Probes, so you can use host- and -group-specific files and the like after their basename. File basenames -are either ``whitelist`` or ``blacklist``. These files have a simple -format; the following is an example. +Decisions, which may contain two files: ``whitelist.xml`` and +``blacklist.xml``. These files have a simple format: + +.. xml:type:: DecisionsType + :linktotype: + :noautodep: py:genshiElements + +For example: .. code-block:: xml - $ cat Decisions/whitelist + $ cat Decisions/whitelist.xml - + + + This example, included as a whitelist due to its name, enables all services, @@ -60,12 +65,6 @@ control these via their respective options (``-I`` or ``-n``, for example). -To add syntax highlighting to Decisions files in vim and emacs, you -can add comments such as this:: - - - - Decision Mode ============= diff -Nru bcfg2-1.3.5/doc/server/plugins/generators/examples/genshi/ganglia.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/examples/genshi/ganglia.txt --- bcfg2-1.3.5/doc/server/plugins/generators/examples/genshi/ganglia.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/examples/genshi/ganglia.txt 2017-01-10 19:18:17.000000000 +0000 @@ -33,7 +33,7 @@ .. code-block:: xml - + diff -Nru bcfg2-1.3.5/doc/server/plugins/generators/examples/jinja2/extends.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/examples/jinja2/extends.txt --- bcfg2-1.3.5/doc/server/plugins/generators/examples/jinja2/extends.txt 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/examples/jinja2/extends.txt 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,62 @@ +.. -*- mode: rst -*- +.. vim: ft=rst + +=========================== + Extending Jinja2 Templates +=========================== + +Jinja2 templates can use the {% extends %} directive to inherit file +fragments which might be common to many configuration files. + +Use the "jinja2_include" suffix for file fragments you will extend. + +``/var/lib/bcfg2/Cfg/foo/common.jinja2_include`` + +.. code-block:: none + + [global] + setting1 = true + setting2 = false + {% block setting3 %}setting3 = "default value"{% endblock %} + + {% block section1 -%} + [section1] + setting4 = true + setting5 = false + {%- endblock %} + + {% block section2 -%} + [section2] + setting6 = true + setting7 = false + {%- endblock %} + +``/var/lib/bcfg2/Cfg/foo/foo.H_hostname.jinja2`` + +.. code-block:: none + + {% extends "common.jinja2_include" %} + {% block setting3 %}setting3 = "new value"{% endblock %} + {% block section1 -%} + [section1] + setting4 = false + setting5 = false + {%- endblock %} + +Output +====== + +.. code-block:: none + + [global] + setting1 = true + setting2 = false + setting3 = "new value" + + [section1] + setting4 = false + setting5 = false + + [section2] + setting6 = true + setting7 = false diff -Nru bcfg2-1.3.5/doc/server/plugins/generators/examples/jinja2/include.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/examples/jinja2/include.txt --- bcfg2-1.3.5/doc/server/plugins/generators/examples/jinja2/include.txt 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/examples/jinja2/include.txt 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,55 @@ +.. -*- mode: rst -*- +.. vim: ft=rst + +=========================== + Including Jinja2 Templates +=========================== + +Jinja2 templates can use the {% include %} directive to include file +fragments which might be common to many configuration files. + +Use the "jinja2_include" suffix for file fragments you will include. + +``/var/lib/bcfg2/Cfg/foo/foo.jinja2`` + +.. code-block:: none + + [global] + setting1 = true + setting2 = false + + {% for x in metadata.groups %}{% include x + '.jinja2_include' ignore missing %} + {% endfor %} + +``/var/lib/bcfg2/Cfg/foo/group1.jinja2_include`` + +.. code-block:: none + + [group1] + setting3 = true + setting4 = false + +``/var/lib/bcfg2/Cfg/foo/group3.jinja2_include`` + +.. code-block:: none + + [group3] + setting7 = true + setting8 = false + +Output +====== + +.. code-block:: none + + [global] + setting1 = true + setting2 = false + + [group1] + setting3 = true + setting4 = false + + [group3] + setting7 = true + setting8 = false diff -Nru bcfg2-1.3.5/doc/server/plugins/generators/examples/jinja2/simple.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/examples/jinja2/simple.txt --- bcfg2-1.3.5/doc/server/plugins/generators/examples/jinja2/simple.txt 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/examples/jinja2/simple.txt 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,53 @@ +.. -*- mode: rst -*- + +========================= + Basic Jinja2 Templates +========================= + +This simple example demonstrates basic usage of Jinja2 templates. + +``/var/lib/bcfg2/Cfg/foo/foo.jinja2`` + +.. code-block:: none + + Hostname is {{ metadata.hostname }} + Filename is {{ name }} + Template is {{ source_path }} + Groups: + {% for group in metadata.groups -%} + * {{ group }} + {% endfor %} + Categories: + {% for category in metadata.categories -%} + * {{ category }} -- {{ metadata.categories[category] }} + {% endfor %} + + Probes: + {% for probe in metadata.Probes -%} + * {{ probe }} -- {{ metadata.Probes[probe] }} + {% endfor %} + +Output +====== + +.. code-block:: xml + + + Hostname is topaz.mcs.anl.gov + Filename is /foo + Template is /var/lib/bcfg2/Cfg/foo/foo.jinja2 + Groups: + * desktop + * mcs-base + * ypbound + * workstation + * xserver + * debian-sarge + * debian + * a + Categories: + * test -- a + + Probes: + * os -- debian + diff -Nru bcfg2-1.3.5/doc/server/plugins/generators/hostbase.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/hostbase.txt --- bcfg2-1.3.5/doc/server/plugins/generators/hostbase.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/hostbase.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,228 +0,0 @@ -.. -*- mode: rst -*- - -.. _server-plugins-generators-hostbase: - -======== -Hostbase -======== - -IP management system built on top of Bcfg2. It has four main parts: a -django data model, a web frontend, command-line utilities, and a Bcfg2 -plugin that generates dhcp, dns, and yp configuration files. - -Installation -============ - -Installation of Hostbase requires installation of a python module, -configuration of database (mysql or postgres), and configuration of an -Apache webserver with mod_python. Hostbase was developed using MySQL, -so this document is aimed at MySQL users. - -Prerequisites -------------- - -* `mysql`_ -* `python-mysqldb`_ -* `Django`_ - -.. _Django: http://www.djangoproject.com -.. _python-mysqldb: http://mysql-python.sourceforge.net/MySQLdb.html -.. _mysql: http://www.mysql.com/ - -Configure the database ----------------------- - -Create the hostbase database and a user. For MySQL users:: - - mysql> CREATE DATABASE hostbase - mysql> quit - - systemprompt#: mysql -u root hostbase - mysql> GRANT ALL PRIVILEGES ON *.* TO hostbaseuser@mycomputer.private.net IDENTIFIED - BY 'password' WITH GRANT OPTION; - mysql> quit - -As of Bcfg2 v0.8.7 configuration options for Hostbase have moved to -``/etc/bcfg2.conf``. There is an example bcfg2.conf with Hostbase -options located at ``bcfg2-tarball/examples/bcfg2.confHostbase``. -Edit the hostbase options to correspond to the database you've -initialized and copy the configuration to ``/etc/bcfg2.conf``. To -finish creating the database, from your ``path to -python/Bcfg2/Server/Hostbase`` directory, run ``python manage.py -syncdb`` to do all table creation. - -Configure the web interface ---------------------------- - -Now it's possible to explore the Hostbase web interface. For -curiosity, you can run Django's built-in development server to take a -peek. Do this by running ``python manage.py runserver -[servername:port]`` from your Hostbase directory. Django will -default to ``localhost:8000`` if no server or port is entered. Now -you can explore the web interface. Try adding a host and a zone. -You'll see that a ".rev" zone already exists. This is where -information for reverse files will go. - -For production, you'll want to have this configured for Apache with -mod_python. Here is an example of how to configure Hostbase as a -virtual host. - -.. code-block:: html - - - ServerAdmin systems@mcs.anl.gov - - DocumentRoot /var/www/hostbase/ - - AllowOverride None - - - # Possible values include: debug, info, notice, warn, error, crit, - # alert, emerg. - LogLevel warn - - ServerSignature Off - - # Stop TRACE/TRACK vulnerability - - RewriteEngine on - RewriteCond %{REQUEST_METHOD} ^(TRACE|TRACK) - RewriteRule .* - [F] - - - Redirect / https://hostbase.mcs.anl.gov/ - - - - ServerAdmin systems@mcs.anl.gov - - DocumentRoot /var/www/hostbase/ - - AllowOverride None - - - # Possible values include: debug, info, notice, warn, error, crit, - # alert, emerg. - LogLevel warn - - ServerSignature Off - - # Stop TRACE/TRACK vulnerability - - RewriteEngine on - RewriteCond %{REQUEST_METHOD} ^(TRACE|TRACK) - RewriteRule .* - [F] - - - SSLEngine On - SSLCertificateFile /etc/apache2/ssl/hostbase_server.crt - SSLCertificateKeyfile /etc/apache2/ssl/hostbase_server.key - - - SetHandler python-program - PythonHandler django.core.handlers.modpython - SetEnv DJANGO_SETTINGS_MODULE Bcfg2.Server.Hostbase.settings - PythonDebug On - - - SetHandler None - - - - -You'll need to copy the contents of ``Hostbase/media`` into -``/var/www/hostbase/site_media`` in this configuration to serve the -correct css files. - -Enable the Hostbase plugin --------------------------- - -Now that the database is accessible and there is some data in it, you can -enable the Hostbase plugin on your Bcfg2 server to start generating some -configuration files. All that needs to be done is to add ``Hostbase`` -to the end of the list of generators in your bcfg2.conf file. To see -what's being generated by Hostbase, fire up a Bcfg2 development server: -``bcfg2-info``. For more information on how to use the Bcfg2 development -server, type help at the prompt. For our purposes, type ``debug``. -This will bring you to an interactive python prompt where you can access -bcfg's core data. - -.. code-block:: python - - for each in bcore.plugins['Hostbase'].filedata: - print each - - -The above loop will print out the name of each file that was generated -by Hostbase. You can see the contents of any of these by typing ``print -bcore.plugins['Hostbase'].filedata[filename]``. - -Create a bundle ---------------- - -Bcfg2 needs a way to distribute the files generated by Hostbase. -We'll do this with a bundle. In bcfg's ``Bundler`` directory, touch -``hostbase.xml``. - -.. code-block:: xml - - - - - - - - - - - -The above example is a bundle that will deliver both dhcp and dns files. -This can be trivially split into separate bundles. It is planned that -Hostbase will eventually be able to generate the list of ``Paths`` -in its bundles automatically. - -Do a Hostbase push ------------------- - -You'll want to be able to trigger the Hostbase plugin to rebuild -it's config files and push them out when data has been modified -in the database. This can be done through and XMLRPC function -available from the Bcfg2 server. From a client that is configured -to receive one or more hostbase bundles, you'll need to first -edit your ``python/site-packages/Bcfg2/Client/Proxy.py`` file. -Add ``'Hostbase.rebuildState'`` to the list of methods in the Bcfg2 -client proxy object. The modified list is shown below: - -.. code-block:: python - - class bcfg2(ComponentProxy): - '''bcfg2 client code''' - name = 'bcfg2' - methods = ['AssertProfile', 'GetConfig', 'GetProbes', 'RecvProbeData', 'RecvStats', 'Hostbase.rebuildState'] - -Now copy the file ``hostbasepush.py`` from ``bcfg2/tools`` in the Bcfg2 -source to your machine. When this command is run as root, it triggers -the Hostbase to rebuild it's files, then runs the Bcfg2 client on your -local machine to grab the new configs. - -NIS Authentication -================== - -Django allows for custom authentication backends to its login procedure. -Hostbase has an NIS authentication backend that verifies a user to be -in the unix group allowed to modify Hostbase. - -To enable this feature: - -* first edit your ``Hostbase/settings.py`` file and uncomment - the line **Hostbase.backends.NISBackend** in the list of - *AUTHENTICATION_BACKENDS* -* enter the name of the unix group you want to give access to Hostbase - in the *AUTHORIZED_GROUP* variable -* in your ``Hostbase/hostbase/views.py`` file at the very bottom, - uncomment the block(s) of lines that give you the desired level - of access - -Hostbase will now direct the user to a login page if he or she is not -authorized to view a certain page. Users should log in with their -regular Unix username and password. diff -Nru bcfg2-1.3.5/doc/server/plugins/generators/nagiosgen.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/nagiosgen.txt --- bcfg2-1.3.5/doc/server/plugins/generators/nagiosgen.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/nagiosgen.txt 2017-01-10 19:18:17.000000000 +0000 @@ -12,7 +12,7 @@ Update ``/etc/bcfg2.conf``, adding NagiosGen to plugins:: - plugins = Base,Bundler,Cfg,...,NagiosGen + plugins = Bundler,Cfg,...,NagiosGen Create the NagiosGen directory:: @@ -29,7 +29,6 @@ check_period 24x7 contact_groups admins event_handler_enabled 1 - failure_prediction_enabled 1 flap_detection_enabled 1 initial_state o max_check_attempts 10 @@ -124,21 +123,21 @@ .. code-block:: xml - + - + - + - + - + @@ -161,10 +160,6 @@ -Update nagios configuration file to use ``nagiosgen.cfg``:: - - cfg_file=/etc/nagios/nagiosgen.cfg - Note that some of these files are built on demand, each time a client in group "nagios-server" checks in with the Bcfg2 server. Local nagios instances can be configured to use the NagiosGen directory in the Bcfg2 @@ -199,7 +194,4 @@ ``NagiosGen/config.xml`` replaces the files ``Properties/NagiosGen.xml`` and ``NagiosGen/parents.xml`` in older versions of Bcfg2; your old configs can be migrated using the -``nagiosgen-convert.py`` tool. The plugin does contain a -backwards-compatibility layer for those older config files, but -``NagiosGen/config.xml`` must exist (even if empty) for the plugin to -function. +``nagiosgen-convert.py`` tool. diff -Nru bcfg2-1.3.5/doc/server/plugins/generators/packages.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/packages.txt --- bcfg2-1.3.5/doc/server/plugins/generators/packages.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/packages.txt 2017-01-10 19:18:17.000000000 +0000 @@ -18,14 +18,10 @@ Limiting sources to groups ========================== -`sources.xml`_ processes ```` and ```` tags just like -Bundles. In addition to any groups or clients specified that way, -clients must be a member of the appropriate architecture group as -specified in a Source stanza. In total, in order for a source to be -associated with a client, the client must be in any explicit groups or -clients specified in `sources.xml`_, and any specified architecture -groups. If `"Magic Groups"`_ are enabled, then the client must be a -member of a matching magic group as well. +``Packages/sources.xml`` processes ```` and ```` tags +just like Bundles. In addition to any groups or clients specified that +way, clients must be a member of the appropriate architecture group as +specified in a Source stanza. Memberships in architecture groups is needed so that Packages can map software sources to clients. There is no other way to handle this than @@ -36,62 +32,6 @@ above). Packages and dependencies are resolved from all applicable sources. -.. note:: - - To recap, a client needs to be a member of the **Architecture** - group and any other groups defined in your - `sources.xml`_ file in order for the client to be - associated to the proper sources. If you are using - :ref:`server-plugins-generators-packages-magic-groups`, then a - client must also be a member of the appropriate OS group. - -.. _server-plugins-generators-packages-magic-groups: - -"Magic Groups" -============== - -.. deprecated:: 1.3.0 - -Packages has the ability to use a feature known as "magic groups"; it -is the only plugin to use that feature. Most plugins operate based on -client group memberships, without any concern for the particular names -chosen for groups by the user. The Packages plugin is the sole -exception to this rule. Packages needs to "know" two different sorts -of facts about clients. The first is the basic OS/distro of the -client, enabling classes of sources. The second is the architecture of -the client, enabling sources for a given architecture. In addition to -these magic groups, each source may also specify non-magic groups to -limit the source's applicability to group member clients. - -+--------+----------+--------------+ -| Source | OS Group | Architecture | -+========+==========+==============+ -| Apt | debian | i386 | -+--------+----------+--------------+ -| Apt | ubuntu | amd64 | -+--------+----------+--------------+ -| Apt | nexenta | | -+--------+----------+--------------+ -| Apt | apt | | -+--------+----------+--------------+ -| Yum | redhat | i386 | -+--------+----------+--------------+ -| Yum | centos | x86_64 | -+--------+----------+--------------+ -| Yum | fedora | | -+--------+----------+--------------+ -| Yum | yum | | -+--------+----------+--------------+ - -Magic OS groups are disabled by default in Bcfg2 1.3 and greater. If -you require magic groups, you can enable them by setting -``magic_groups`` to ``1`` in the ``[packages]`` section of -``bcfg2.conf``. - -Magic groups will be removed in a future release. - -Magic architecture groups cannot be disabled. - Setup ===== @@ -102,14 +42,13 @@ software repositories should be used, and which clients are eligible to use each one. #. Ensure that clients are members of the proper groups. Each client - should be a member of all of the groups listed in the `sources.xml` - (like ubuntu-intrepid or centos-5.2 in the following examples), one - of the architecture groups listed in the source configuration - (i386, amd64 or x86_64 in the following examples), and one of the - magic groups listed above, if magic groups are enabled. '''Failure - to do this will result in the source either not applying to the - client, or only architecture independent packages being made - available to the client.''' + should be a member of all of the groups listed in the + ``sources.xml`` (like ubuntu-intrepid or centos-5.2 in the + following examples), and one of the architecture groups listed in + the source configuration (i386, amd64 or x86_64 in the following + examples). '''Failure to do this will result in the source either + not applying to the client, or only architecture independent + packages being made available to the client.''' #. Add Package entries to bundles. #. Sit back and relax, as dependencies are resolved, and automatically added to client configurations. @@ -122,6 +61,7 @@ Bundles. The primary element in ``sources.xml`` is the Source tag: .. xml:element:: Source + :noautodep: py:genshiElements Handling GPG Keys ----------------- @@ -198,9 +138,7 @@ and the client metadata instance is passed into Packages' resolver. This process determines a superset of packages that will fully satisfy dependencies of all package entries included in structures, and reports -any prerequisites that cannot be satisfied. This facility should largely -remove the need to use the :ref:`Base ` -plugin. +any prerequisites that cannot be satisfied. Disabling dependency resolution ------------------------------- @@ -275,22 +213,18 @@ Packages plugin to add recommended packages by adding the :xml:attribute:`SourceType:recommended` attribute, e.g.: - .. code-block:: xml + .. code-block:: none - .. warning:: You must regenerate the Packages cache when adding or - removing the recommended attribute (``bcfg2-admin xcmd - Packages.Refresh``). - .. [#f1] Bcfg2 will by default add **Essential** packages to the client specification. You can disable this behavior by setting the :xml:attribute:`SourceType:essential` attribute to *false*: - .. code-block:: xml + .. code-block:: none - + Yum sources can be similarly specified: @@ -409,9 +343,85 @@ Availability ============ -Support for clients using yum and apt is currently available. Support for +Support for the following clients is currently available. Support for other package managers (Portage, Zypper, IPS, etc) remain to be added. +apt +--- + +All dpkg based clients (for example Debian, Ubuntu or Nexenta) could be +handled with the apt module: + +.. code-block:: xml + + + main + universe + i386 + amd64 + + + +pac +--- + +For Arch Linux or Parabola GNU/Linux-libre you could use the pac module +for packages. You do not need to supply a version attribute as the mirrors +are rolling release and does not supply different versions. + +.. code-block:: xml + + + core + extra + community + i686 + x86_64 + + + +pkgng +----- + +The support for the Next Generation package management tool for FreeBSD +is called pkgng. It downloads the packagesite file from the mirror +and parses the dependencies out of it. It currently does not use the +DNS SRV record lookup mechanism to get the correct mirror and does +not verify the signature inside the packagesite file. + +.. code-block:: xml + + + latest + x86:64 + x86:32 + + + +yum +--- + +Rpm based clients (for example RedHat, CentOS or Fedora) could be handled +with the yum module: + +.. code-block:: xml + + + os + updates + extras + i386 + x86_64 + + + Package Checking and Verification ================================= @@ -451,7 +461,7 @@ .. code-block:: xml - + @@ -473,6 +483,59 @@ .. _native-yum-libraries: +Package Groups +============== + +Some packaging systems provide package groups. To include a package +group, use the :xml:attribute:`PackageStructure:group` attribute of +the :xml:element:`Package` tag. + +pac +--- + +.. versionadded:: 1.4.0 + +Pacman `groups `_ are supported: + +.. code-block:: xml + + + +yum +--- + +Yum package groups are supported by both the native Yum libraries and +Bcfg2's internal dependency resolver. You can use either the short +group ID or the long group name: + +.. code-block:: xml + + + + +By default, only those packages considered the "default" packages in a +group will be installed. You can change this behavior using the +:xml:attribute:`PackageStructure:type` attribute: + +.. code-block:: xml + + + + +Valid values of "type" are: + +* ``mandatory``: Only install mandatory packages in the group. +* ``default``: Install default packages from the group (the default). +* ``optional`` or ``all``: Install all packages in the group, + including mandatory, default, and optional packages. + +See :xml:type:`PackageStructure` for details. + +You can view the packages in a group by category with the ``yum +groupinfo`` command. More information about the different levels can +be found at +http://fedoraproject.org/wiki/How_to_use_and_edit_comps.xml_for_package_groups#Installation + Using Native Yum Libraries ========================== @@ -536,43 +599,6 @@ * ``reposdir`` is set to ``/dev/null`` to prevent the server's Yum configuration from being read; do not change this. -Package Groups --------------- - -Yum package groups are supported by both the native Yum libraries and -Bcfg2's internal dependency resolver. To include a package group, use -the :xml:attribute:`PackageStructure:group` attribute of the -:xml:element:`Package` tag. You can use either the short group ID or -the long group name: - -.. code-block:: xml - - - - -By default, only those packages considered the "default" packages in a -group will be installed. You can change this behavior using the -:xml:attribute:`PackageStructure:type` attribute: - -.. code-block:: xml - - - - -Valid values of "type" are: - -* ``mandatory``: Only install mandatory packages in the group. -* ``default``: Install default packages from the group (the default). -* ``optional`` or ``all``: Install all packages in the group, - including mandatory, default, and optional packages. - -See :xml:type:`PackageStructure` for details. - -You can view the packages in a group by category with the ``yum -groupinfo`` command. More information about the different levels can -be found at -http://fedoraproject.org/wiki/How_to_use_and_edit_comps.xml_for_package_groups#Installation - Abstract Package Tags --------------------- @@ -707,6 +733,9 @@ +-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ | Name | Description | Values | Default | +=============+======================================================+==========+===================================================================+ +| backends | List of backends that should be loaded for the | List | Yum,Apt,Pac,Pkgng | +| | dependency resolution. | | | ++-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ | resolver | Enable dependency resolution | Boolean | True | +-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ | metadata | Enable metadata processing. Disabling ``metadata`` | Boolean | True | diff -Nru bcfg2-1.3.5/doc/server/plugins/generators/pkgmgr.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/pkgmgr.txt --- bcfg2-1.3.5/doc/server/plugins/generators/pkgmgr.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/pkgmgr.txt 2017-01-10 19:18:17.000000000 +0000 @@ -10,10 +10,10 @@ to a package specification that the client can use to detect, verify and install the specified package. -For a package specification to be included in the Literal configuration -the name attribute from an Abstract Package Tag (from Base or Bundler) -must match the name attribute of a Package tag in Pkgmgr, along with -the appropriate group associations of course. +For a package specification to be included in the Literal +configuration the name attribute from an abstract Package tag (from +Bundler) must match the name attribute of a Package tag in Pkgmgr, +along with the appropriate group associations of course. Each file in the Pkgmgr directory has a priority. This allows the same package to be served by multiple files. The priorities can be diff -Nru bcfg2-1.3.5/doc/server/plugins/generators/rules.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/rules.txt --- bcfg2-1.3.5/doc/server/plugins/generators/rules.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/rules.txt 2017-01-10 19:18:17.000000000 +0000 @@ -20,32 +20,14 @@ to literal configuration entries suitable for the client drivers to consume. -For an entity specification to be included in the Literal configuration -the name attribute from an Abstract Entity Tag (from Base or Bundler) -must match the name attribute of an Entity tag in Rules, along with the -appropriate group associations of course. +For an entity specification to be included in the Literal +configuration the name attribute from an abstract entity tag (from +Bundler) must match the name attribute of an entity tag in Rules, +along with the appropriate group associations of course. Each file in the Rules directory has a priority. This allows the same Entities to be served by multiple files. The priorities can be used to -break ties in the case that multiple files serve data for the same Entity. - - -Usage of Groups in Rules -======================== - -Groups are used by the Rules plugin, along with host metadata, for -selecting the Configuration Entity entries to include in the clients -literal configuration. They can be thought of as:: - - if client is a member of group1 then - assign to literal config - -Nested groups are conjunctive (logical and).:: - - if client is a member of group1 and group2 then - assign to literal config - -Group membership may be negated. +break ties in the case that multiple files serve data for the same entity. Tag Attributes in Rules ======================= @@ -266,10 +248,13 @@ :onlyattrs: to :requiredattrs: to +.. _server-plugins-generators-rules-vcs: + vcs ^^^ -Check out the specified VCS repository to the given path. +Check out the specified VCS repository to the given path. See +:ref:`client-tools-vcs` for more details. .. xml:type:: PathType :nochildren: @@ -515,8 +500,8 @@ expressions. This entails a small performance and memory usage penalty. To do so, add the following setting to ``bcfg2.conf``:: - [rules] - regex = yes + [rules] + regex = yes With regular expressions enabled, you can use a regex in the ``name`` attribute to match multiple abstract configuration entries. @@ -527,3 +512,23 @@ Note that only one Rule can apply to any abstract entry, so you cannot specify multiple regexes to match the same rule. + +Replacing the name of the Entry in Attributes +============================================= + +If you are using regular expressions to match the abstract configuration +entries, you may need the concrete name of the entry in some attributes. +To use this feature, you have to enable it. It is only useful, if used +together with regex matching. :: + + [rules] + regex = yes + replace_name = yes + +You now can write something like that in your xml file: + +.. code-block:: xml + + + +``%{name}`` will be correctly replaced with the username for each POSIXUser. diff -Nru bcfg2-1.3.5/doc/server/plugins/generators/semodules.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/semodules.txt --- bcfg2-1.3.5/doc/server/plugins/generators/semodules.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/semodules.txt 2017-01-10 19:18:17.000000000 +0000 @@ -41,7 +41,7 @@ .. code-block:: xml - + @@ -50,7 +50,7 @@ .. note:: If you use a ``BoundSEModule`` tag, you must *not* include the - ``.pp`` extension. This is not recommend, though. + ``.pp`` extension. This is not recommended, though. You can also install a disabled module: diff -Nru bcfg2-1.3.5/doc/server/plugins/generators/sshbase.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/sshbase.txt --- bcfg2-1.3.5/doc/server/plugins/generators/sshbase.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/sshbase.txt 2017-01-10 19:18:17.000000000 +0000 @@ -14,8 +14,8 @@ It has two functions: -* Generating new ssh keys -- When a client requests a ecdsa, dsa, rsa, - or v1 key, and there is no existing key in the repository, one is +* Generating new ssh keys -- When a client requests a key (v1, rsa, + ecdsa, etc.), and there is no existing key in the repository, one is generated. * Maintaining the ``ssh_known_hosts`` file -- all current known public @@ -73,6 +73,7 @@ * RSA2 (``ssh_host_rsa_key``, ``ssh_host_rsa_key.pub``) * DSA (``ssh_host_dsa_key``, ``ssh_host_dsa_key.pub``) * ECDSA (``ssh_host_ecdsa_key``, ``ssh_host_ecdsa_key.pub``) +* Ed25519 (``ssh_host_ed25519_key``, ``ssh_host_ed25519_key.pub``) Group-specific keys =================== @@ -143,25 +144,39 @@ Default permissions are as follows: -+----------------------------------+-------+-------+------+-----------+----------+----------+ -| File | owner | group | mode | sensitive | paranoid | encoding | -+==================================+=======+=======+======+===========+==========+==========+ -| ssh_known_hosts | root | root | 0644 | false | false | None | -+----------------------------------+-------+-------+------+-----------+----------+----------+ -| ssh_host_key | root | root | 0600 | false | false | base64 | -+----------------------------------+-------+-------+------+-----------+----------+----------+ -| ssh_host_key.pub | root | root | 0644 | false | false | base64 | -+----------------------------------+-------+-------+------+-----------+----------+----------+ -| ssh_host_[rsa|dsa|ecdsa]_key | root | root | 0600 | false | false | None | -+----------------------------------+-------+-------+------+-----------+----------+----------+ -| ssh_host_[rsa|dsa|ecdsa]_key.pub | root | root | 0644 | false | false | None | -+----------------------------------+-------+-------+------+-----------+----------+----------+ ++------------------------------------------+-------+-------+------+-----------+----------+----------+ +| File | owner | group | mode | sensitive | paranoid | encoding | ++==========================================+=======+=======+======+===========+==========+==========+ +| ssh_known_hosts | root | root | 0644 | false | false | None | ++------------------------------------------+-------+-------+------+-----------+----------+----------+ +| ssh_host_key | root | root | 0600 | false | false | base64 | ++------------------------------------------+-------+-------+------+-----------+----------+----------+ +| ssh_host_key.pub | root | root | 0644 | false | false | base64 | ++------------------------------------------+-------+-------+------+-----------+----------+----------+ +| ssh_host_[rsa|dsa|ecdsa|ed25519]_key | root | root | 0600 | false | false | None | ++------------------------------------------+-------+-------+------+-----------+----------+----------+ +| ssh_host_[rsa|dsa|ecdsa|ed25519]_key.pub | root | root | 0644 | false | false | None | ++------------------------------------------+-------+-------+------+-----------+----------+----------+ Note that the ``sensitive`` attribute is false, even for private keys, in order to permit :ref:`pulling with bcfg2-admin `. You should almost certainly set ``sensitive`` to "true" in ``info.xml``. + +.. _server-plugins-generators-sshbase-encryption: + +Encryption +========== + +SSHbase can optionally encrypt the private keys that it generates. To +enable this feature, set the ``passphrase`` option in the +``[sshbase]`` section of ``bcfg2.conf`` to the name of the passphrase +that should be used to encrypt all SSH keys. (The passphrases are +enumerated in the ``[encryption]`` section.) See +:ref:`server-encryption` for more details on Bcfg2 encryption in +general. + Blog post ========= diff -Nru bcfg2-1.3.5/doc/server/plugins/generators/sslca.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/sslca.txt --- bcfg2-1.3.5/doc/server/plugins/generators/sslca.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/sslca.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,361 +0,0 @@ -.. -*- mode: rst -*- - -.. _server-plugins-generators-sslca: - -===== -SSLCA -===== - -SSLCA is a generator plugin designed to handle creation of SSL private -keys and certificates on request. - -Borrowing ideas from :ref:`server-plugins-generators-cfg-genshi` and -the :ref:`server-plugins-generators-sshbase` plugin, SSLCA automates -the generation of SSL certificates by allowing you to specify key and -certificate definitions. Then, when a client requests a Path that -contains such a definition within the SSLCA repository, the matching -key/cert is generated, and stored in a hostfile in the repo so that -subsequent requests do not result in repeated key/cert recreation. In -the event that a new key or cert is needed, the offending hostfile can -simply be removed from the repository, and the next time that host -checks in, a new file will be created. If that file happens to be the -key, any dependent certificates will also be regenerated. - -.. _getting-started: - -Getting started -=============== - -In order to use SSLCA, you must first have at least one CA configured -on your system. For details on setting up your own OpenSSL based CA, -please see http://www.openssl.org/docs/apps/ca.html for details of the -suggested directory layout and configuration directives. - -For SSLCA to work, the openssl.cnf (or other configuration file) for -that CA must contain full (not relative) paths. - -#. Add SSLCA to the **plugins** line in ``/etc/bcfg2.conf`` and - restart the server -- This enabled the SSLCA plugin on the Bcfg2 - server. - -#. Add a section to your ``/etc/bcfg2.conf`` called ``sslca_foo``, - replacing foo with the name you wish to give your CA so you can - reference it in certificate definitions. - -#. Under that section, add an entry for ``config`` that gives the - location of the openssl configuration file for your CA. - -#. If necessary, add an entry for ``passphrase`` containing the - passphrase for the CA's private key. We store this in - ``/etc/bcfg2.conf`` as the permissions on that file should have it - only readable by the bcfg2 user. If no passphrase is entry exists, - it is assumed that the private key is stored unencrypted. - -#. Optionally, Add an entry ``chaincert`` that points to the location - of your ssl chaining certificate. This is used when preexisting - certifcate hostfiles are found, so that they can be validated and - only regenerated if they no longer meet the specification. If - you're using a self signing CA this would be the CA cert that you - generated. If the chain cert is a root CA cert (e.g., if it is a - self-signing CA), also add an entry ``root_ca = true``. If - ``chaincert`` is omitted, certificate verification will not be - performed. - -#. Once all this is done, you should have a section in your - ``/etc/bcfg2.conf`` that looks similar to the following:: - - [sslca_default] - config = /etc/pki/CA/openssl.cnf - passphrase = youReallyThinkIdShareThis? - chaincert = /etc/pki/CA/chaincert.crt - root_ca = true - -#. You are now ready to create key and certificate definitions. For - this example we'll assume you've added Path entries for the key, - ``/etc/pki/tls/private/localhost.key``, and the certificate, - ``/etc/pki/tls/certs/localhost.crt`` to a bundle or base. - -#. Defining a key or certificate is similar to defining a Cfg file. - Under your Bcfg2's ``SSLCA/`` directory, create the directory - structure to match the path to your key. In this case this would be - something like - ``/var/lib/bcfg2/SSLCA/etc/pki/tls/private/localhost.key``. - -#. Within that directory, create a `key.xml`_ file containing the - following: - - .. code-block:: xml - - - - - -#. This will cause the generation of an 2048 bit RSA key when a client - requests that Path. Alternatively you can specify ``dsa`` as the - keytype, or a different number of bits. - -#. Similarly, create the matching directory structure for the - certificate path, and a `cert.xml`_ containing the following: - - .. code-block:: xml - - - - - -#. When a client requests the cert path, a certificate will be - generated using the key hostfile at the specified key location, - using the CA matching the ca attribute. ie. ca="default" will match - [sslca_default] in your ``/etc/bcfg2.conf`` - -.. _sslca-configuration: - -Configuration -============= - -bcfg2.conf ----------- - -``bcfg2.conf`` contains miscellaneous configuration options for the -SSLCA plugin. These are described in some detail above in -`getting-started`, but are also enumerated here as a reference. Any -booleans in the config file accept the values "1", "yes", "true", and -"on" for True, and "0", "no", "false", and "off" for False. - -Each directive below should appear at most once in each -``[sslca_]`` section. The following directives are understood: - -+--------------+------------------------------------------+---------+---------+ -| Name | Description | Values | Default | -+==============+==========================================+=========+=========+ -| config | Path to the openssl config for the CA | String | None | -+--------------+------------------------------------------+---------+---------+ -| passphrase | Passphrase for the CA private key | String | None | -+--------------+------------------------------------------+---------+---------+ -| chaincert | Path to the SSL chaining certificate for | String | None | -| | verification | | | -+--------------+------------------------------------------+---------+---------+ -| root_ca | Whether or not ```` is a root | Boolean | false | -| | CA (as opposed to an intermediate cert) | | | -+--------------+------------------------------------------+---------+---------+ - -Only ``config`` is required. - -cert.xml --------- - -.. xml:schema:: sslca-cert.xsd - :linktotype: - :inlinetypes: CertType - -Example -^^^^^^^ - -.. code-block:: xml - - - test.example.com - - - - - - - - -key.xml -------- - -.. xml:schema:: sslca-key.xsd - :linktotype: - :inlinetypes: KeyType - -Example -^^^^^^^ - -.. code-block:: xml - - - - - - - - - - -Automated Bcfg2 SSL Authentication -================================== - -This section describes one possible scenario for automating ssl -certificate generation and distribution for bcfg2 client/server -communication using SSLCA. The process involves configuring a -certificate authority (CA), generating the CA cert and key pair, -configuring the bcfg2 SSLCA plugin and a Bundle to use the SSLCA -generated certs to authenticate the bcfg2 client and server. - -OpenSSL CA ----------- - -If you already have a SSL CA available you can skip this section, -otherwise you can easily build one on the server using openssl. The -paths should be adjusted to suite your preferences. - -#. Prepare the directories and files:: - - mkdir -p /etc/pki/CA/newcerts - mkdir /etc/pki/CA/crl - echo '01' > /etc/pki/CA/serial - touch /etc/pki/CA/index.txt - touch /etc/pki/CA/crlnumber - -#. Edit the ``openssl.cnf`` config file, and in the **[ CA_default ]** - section adjust the following parameters:: - - dir = /etc/pki # Where everything is kept - certs = /etc/pki/CA/certs # Where the issued certs are kept - database = /etc/pki/CA/index.txt # database index file. - new_certs_dir = /etc/pki/CA/newcerts # default place for new certs. - certificate = /etc/pki/CA/certs/bcfg2ca.crt # The CA certificate - serial = /etc/pki/CA/serial # The current serial number - crl_dir = /etc/pki/CA/crl # Where the issued crl are kept - crlnumber = /etc/pki/CA/crlnumber # the current crl number - crl = /etc/pki/CA/crl.pem # The current CRL - private_key = /etc/pki/CA/private/bcfg2ca.key # The private key - -#. Create the CA root certificate and key pair. You'll be asked to - supply a passphrase, and some organizational info. The most - important bit is **Common Name** which you should set to be the - hostname of your bcfg2 server that your clients will see when doing - a reverse DNS query on it's ip address.:: - - openssl req -new -x509 -extensions v3_ca -keyout bcfg2ca.key \ - -out bcfg2ca.crt -days 3650 - -#. Move the generated cert and key to the locations specified in - ``openssl.cnf``:: - - mv bcfg2ca.key /etc/pki/CA/private/ - mv bcfg2ca.crt /etc/pki/CA/certs/ - -Your self-signing CA is now ready to use. - -Bcfg2 ------ - -SSLCA -^^^^^ - -The SSLCA plugin was not designed specifically to manage bcfg2 -client/server communication though it is certainly able to provide -certificate generation and management services for that -purpose. You'll need to configure the **SSLCA** plugin to serve the -key, and certificate paths that we will define later in our client's -``bcfg2.conf`` file. - -The rest of these instructions will assume that you've configured the -**SSLCA** plugin as described above and that the files -``SSLCA/etc/pki/tls/certs/bcfg2client.crt/cert.xml`` and -``SSLCA/etc/pki/tls/private/bcfg2client.key/key.xml`` represent the -cert and key paths you want generated for SSL auth. - -Client Bundle -^^^^^^^^^^^^^ - -To automate the process of generating and distributing certs to the -clients we need define at least the Cert and Key paths served by the -SSLCA plugin, as well as the ca certificate path in a Bundle. For -example: - -.. code-block:: xml - - - - - -Here's a more complete example bcfg2-client bundle: - -.. code-block:: xml - - - - - - - - - - - - - - - - - - - - -In the above example we told Bcfg2 that it also needs to serve -``/etc/bcfg2.conf``. This is optional but convenient. - -The ``bcfg2.conf`` client config needs at least 5 parameters set for -SSL auth. - -#. ``key`` : This is the host specific key that SSLCA will generate. -#. ``certificate`` : This is the host specific cert that SSLCA will - generate. -#. ``ca`` : This is a copy of your CA certificate. Not generated by - SSLCA. -#. ``user`` : Usually set to fqdn of client. This *shouldn't* be - required but is as of 1.3.0. See: - http://trac.mcs.anl.gov/projects/bcfg2/ticket/1019 -#. ``password`` : Set to arbitrary string when using certificate - auth. This also *shouldn't* be required. See: - http://trac.mcs.anl.gov/projects/bcfg2/ticket/1019 - -Here's what a functional **[communication]** section in a -``bcfg2.conf`` genshi template for clients might look like.:: - - [communication] - protocol = xmlrpc/ssl - {% if metadata.uuid != None %}\ - user = ${metadata.uuid} - {% end %}\ - password = DUMMYPASSWORDFORCERTAUTH - {% choose %}\ - {% when 'rpm' in metadata.groups %}\ - certificate = /etc/pki/tls/certs/bcfg2client.crt - key = /etc/pki/tls/private/bcfg2client.key - ca = /etc/pki/tls/certs/bcfg2ca.crt - {% end %}\ - {% when 'deb' in metadata.groups %}\ - certificate = /etc/ssl/certs/bcfg2client.crt - key = /etc/ssl/private/bcfg2client.key - ca = /etc/ssl/certs/bcfg2ca.crt - {% end %}\ - {% end %}\ - -As a client will not be able to authenticate with certificates it does -not yet possess we need to overcome the chicken and egg scenario the -first time we try to connect such a client to the server. We can do so -using password based auth to boot strap the client manually specifying -all the relevant auth parameters like so:: - - bcfg2 -qv -S https://fqdn.of.bcfg2-server:6789 -u fqdn.of.client \ - -x SUPER_SECRET_PASSWORD - -If all goes well the client should recieve a freshly generated key and -cert and you should be able to run ``bcfg2`` again without specifying -the connection parameters. - -If you do run into problems you may want to review -:ref:`appendix-guides-authentication`. - -TODO -==== - -#. Add generation of pkcs12 format certs diff -Nru bcfg2-1.3.5/doc/server/plugins/generators/tcheetah.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/tcheetah.txt --- bcfg2-1.3.5/doc/server/plugins/generators/tcheetah.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/tcheetah.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,197 +0,0 @@ -.. -*- mode: rst -*- - -.. _server-plugins-generators-tcheetah: - -======== -TCheetah -======== - -.. warning:: - - TCheetah is deprecated. You should instead use - :ref:`server-plugins-generators-cfg-cheetah` in the Cfg plugin. - -This document reflects the ``TCheetah`` plugin. - -The ``TCheetah`` plugin allows you to use the `cheetah templating system -`_ to create files, instead of the -various diff-based methods offered by the ``Cfg`` plugin. It also allows -you to include the results of probes executed on the client in the -created files. - -To begin, you will need to download and install the Cheetah templating -engine from http://www.cheetahtemplate.org/. Once it is installed, -you can enable it by adding ``TCheetah`` to the ``plugins`` line in -``/etc/bcfg2.conf`` on your Bcfg server. For example:: - - plugins = Base,Bundler,Cfg,...,TCheetah - -The ``TCheetah`` plugin makes use of a ``Cfg``-like directory structure -located in in a ``TCheetah`` subdirectory of your repository, usually -``/var/lib/bcfg2/TCheetah``. Each file has a directory containing two -files, ``template`` and ``info``. The template is a standard Cheetah -template with two additions: - -* `self.metadata` is the client's :ref:`metadata ` -* `self.metadata.Properties.xdata` is an xml document of unstructured data - -The ``info`` file is formatted like ``:info`` files from Cfg. - -Mostly, people will want to use client metadata. - -File permissions -================ - -File permissions for entries handled by TCheetah are controlled via the -use of :ref:`server-info` files. Note that you **cannot** use both a -Permissions entry and a Path entry to handle the same file. - -self.metadata variables -======================= - -self.metadata is an instance of the class ClientMetadata and documented -:ref:`here `. - -self.metadata.Properties.xdata -============================== - -.. note:: - - If you want to use Properties, you will need to enable the - :ref:`server-plugins-connectors-properties` plugin in - ``/etc/bcfg2.conf``. - -Properties.xdata is a python `ElementTree `_ -object, loaded from the data in ``/var/lib/bcfg2/Properties/.xml``. That file should have a ``Properties`` node at its root. - -Example ``Properties/example.xml``: - -.. code-block:: xml - - - - - /dev/sda - - - - -You may use any of the ElementTree methods to access data in your -template. Several examples follow, each producing an identical result -on the host 'www.example.com':: - - $self.metadata.Properties['example.xml'].xdata.find('host').find('www.example.com').find('rootdev').text - $self.metadata.Properties['example.xml'].xdata.find('host').find($self.metadata.hostname).find('rootdev').text - ${self.metadata.Properties['example.xml'].xdata.xpath('host/www.example.com/rootdev')[0].text} - ${self.metadata.Properties['example.xml'].xdata.xpath('host/' + self.metadata.hostname + '/rootdev')[0].text} - #set $path = 'host/' + $self.metadata.hostname + '/rootdev' - ${self.metadata.Properties['example.xml'].xdata.xpath($path)[0].text} - ${self.metadata.Properties['example.xml'].xdata.xpath(path)[0].text} - -Other Variables -=============== - -* **Template.searchList(self)[1]['path']** is the Path name specified in a Bundle -* **Template.searchList(self)[1]['source_path']** is the path to the TCheetah template on the Bcfg2 server - -Simple Example -============== - -TCheetah works similar to Cfg in that you define all literal information -about a particular file in a directory rooted at TCheetah/path_to_file. -The actual file contents are placed in a file named `template` in that -directory. Below is a simple example a file ``/foo``. - -``/var/lib/bcfg2/TCheetah/foo/template`` - -.. code-block:: none - - > buildfile /foo - Hostname is $self.metadata.hostname - Filename is $Template.searchList(self)[1]['path'] - Template is $Template.searchList(self)[1]['source_path'] - Groups: - #for $group in $self.metadata.groups: - * $group - #end for - Categories: - #for $category in $self.metadata.categories: - * $category -- $self.metadata.categories[$category] - #end for - - Probes: - #for $probe in $self.metadata.Probes: - * $probe -- $self.metadata.Probes[$probe] - #end for - -``/var/lib/bcfg2/TCheetah/foo/info`` - -.. code-block:: none - - mode: 624 - -Output ------- - -The following output can be generated with bcfg2-info. Note that probe -information is not persistent, hence, it only works when clients directly -query the server. For this reason, bcfg2-info output doesn't reflect -current client probe state. - -.. code-block:: xml - - - Hostname is topaz.mcs.anl.gov - Filename is /foo - Template is /var/lib/bcfg2/TCheetah/foo/template - Groups: - * desktop - * mcs-base - * ypbound - * workstation - * xserver - * debian-sarge - * debian - * a - Categories: - * test -- a - - Probes: - - -Example: Replace the crontab plugin -=================================== - -In many cases you can use the TCheetah plugin to avoid writing custom -plugins in Python. This example randomizes the time of cron.daily -execution with a stable result. Cron.daily is run at a consistent, -randomized time between midnight and 7am.:: - - #import random - #silent random.seed($self.metadata.hostname) - - # /etc/crontab: system-wide crontab - # Unlike any other crontab you don't have to run the `crontab` - # command to install the new version when you edit this file. - # This file also has a username field, that none of the other crontabs do. - - SHELL=/bin/sh - PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin://bin - - # m h dom mon dow user command - 17 * * * * root run-parts --report /etc/cron.hourly - $random.randrange(0,59) $random.randrange(0,6) * * * root test -x /usr/sbin/anacron || run-parts --report /etc/cron.daily - 47 6 * * 7 root test -x /usr/sbin/anacron || run-parts --report /etc/cron.weekly - 52 6 1 * * root test -x /usr/sbin/anacron || run-parts --report /etc/cron.monthly. - -.. note:: Comments and Cheetah - As Cheetah processes your templates it will consider hash "#" style - comments to be actual comments in the template and will strip them - from the final config file. If you would like to preserve the comment - in the final config file you need to escape the hash character '\#' - which will tell Cheetah (and Python) that you do in fact want the - comment to appear in the final config file.:: - - # This is a comment in my template which will be stripped when it's processed through Cheetah - \# This comment will appear in the generated config file. diff -Nru bcfg2-1.3.5/doc/server/plugins/generators/tgenshi.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/tgenshi.txt --- bcfg2-1.3.5/doc/server/plugins/generators/tgenshi.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/generators/tgenshi.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,213 +0,0 @@ -.. -*- mode: rst -*- - -.. _server-plugins-generators-tgenshi-index: - -======= -TGenshi -======= - -.. warning:: - - The TGenshi plugin is deprecated. You should instead use - :ref:`server-plugins-generators-cfg-genshi` in the Cfg plugin. - -This page documents the TGenshi plugin. This plugin works with version -0.4 and newer of the genshi library. - -The TGenshi plugin allows you to use the `Genshi -`_ templating system to create files, -instead of the various diff-based methods offered by the Cfg -plugin. It also allows you to include the results of probes executed -on the client in the created files. - -To begin, you will need to download and install the Genshi templating engine. - -To install on CentOS or RHEL, run:: - - sudo yum install python-genshi - -Once it is installed, you can enable it by adding ``TGenshi`` to the -generators line in ``/etc/bcfg2.conf`` on your Bcfg server. For example:: - - plugins = Base,Bundler,Cfg,...,TGenshi - -The TGenshi plugin makes use of a Cfg-like directory structure -located in in a TGenshi subdirectory of your repository, usually -``/var/lib/bcfg2/TGenshi``. Each file has a directory containing two file -types, template and info. Templates are named according to the genshi -format used; template.txt uses the genshi text format, and template.xml -uses the XML format. - -If used with Genshi 0.5 or later the plugin also supports the `new -style -`_ -text template format for files named template.newtxt. One of the -advantages of the new format is that it does not use # as a command -delimiter, making it easier to utilize for configuration files that -use # as a comment character. - -Only one template format may be used per file served. Info files are -identical to those used in ``Cfg``, and ``info.xml`` files are -supported. - -Inside of templates -=================== - -* **metadata** is the client's :ref:`metadata - ` -* **metadata.Properties** is an xml document of unstructured data (only - available when used in conjunction with the - :ref:`server-plugins-connectors-properties` plugin) -* **name** is the path name specified in bcfg -* **path** is the path to the TGenshi template. It starts with a - leading slash, and is relative to the Bcfg2 specification root. - E.g., ``/Cfg/etc/foo.conf/foo.conf.genshi`` or - ``/TGenshi/etc/foo.conf/template.newtxt.H_foo.example.com`` - -See the genshi `documentation -`_ for examples of -Genshi syntax. - -Examples: Old Genshi Syntax ---------------------------- - -Genshi's web pages recommend against using this syntax, as it may -disappear from future releases. - -Group Negation -^^^^^^^^^^^^^^ - -Templates are also useful for cases where more sophisticated boolean -operations than those supported by Cfg are needed. For example, the -template:: - - #if "ypbound" in metadata.groups and "workstation" in metadata.groups - client is ypbound workstation - #end - #if "ubuntu" not in metadata.groups and "desktop" in metadata.groups - client is a desktop, but not an ubuntu desktop - #end - -Produces: - -.. code-block:: xml - - client is ypbound workstation - client is a desktop, but not an ubuntu desktop - - -This flexibility provides the ability to build much more compact and -succinct definitions of configuration contents than Cfg can. - -Troubleshooting -=============== - -When developing a template, you can see what the template would -generate on a client with :ref:`bcfg2-info `:: - - bcfg2-info buildfile - -E.g.:: - - bcfg2-info buildfile /etc/foo.conf foo.example.com - -To generate a file with an altsrc attribute, you can run:: - - bcfg2-info buildfile /etc/foo/foo.conf --altsrc=/etc/foo.conf \ - foo.example.com - -Sometimes, it's useful to be able to do more in-depth troubleshooting -by running the template manually. To do this, run ``bcfg2-info -debug``, and, once in the Python interpreter, run:: - - metadata = self.build_metadata("") - path = "" - -``path`` should be set to the path to the template file with a leading -slash, relative to the Bcfg2 specification root. See `Inside of -Templates`_ for examples. - -Then, run:: - - import os, Bcfg2.Options - from genshi.template import TemplateLoader, NewTextTemplate - name = os.path.dirname(path[path.find('/', 1):]) - setup = Bcfg2.Options.OptionParser({'repo': - Bcfg2.Options.SERVER_REPOSITORY}) - setup.parse('--') - template = TemplateLoader().load(setup['repo'] + path, cls=NewTextTemplate) - print template.generate(metadata=metadata, path=path, name=name).render() - -This gives you more fine-grained control over how your template is -rendered. - -You can also use this approach to render templates that depend on -:ref:`altsrc ` tags by setting -``path`` to the path to the template, and setting ``name`` to the path -to the file to be generated, e.g.:: - - metadata = self.build_metadata("foo.example.com") - path = "/Cfg/etc/sysconfig/network-scripts/ifcfg-template/ifcfg-template.genshi" - name = "/etc/sysconfig/network-scripts/ifcfg-bond0" - -File permissions -================ - -File permissions for entries handled by TGenshi are controlled via the -use of :ref:`server-info` files. Note that you **cannot** use both a -Permissions entry and a Path entry to handle the same file. - -Error handling -================ - -Situations may arise where a templated file cannot be generated due to -missing or incomplete information. A TemplateError can be raised to -force a bind failure and prevent sending an incomplete file to the -client. For example, this template:: - - {% python - from genshi.template import TemplateError - grp = None - for g in metadata.groups: - if g.startswith('ganglia-gmond-'): - grp = g - break - else: - raise TemplateError, "Missing group" - %}\ - -will fail to bind if the client is not a member of a group starting with -"ganglia-gmond-". The syslogs on the server will contain this message:: - - bcfg2-server[5957]: Genshi template error: Missing group - bcfg2-server[5957]: Failed to bind entry: Path /etc/ganglia/gmond.conf - -indicating the bind failure and message raised with the TemplateError. - -FAQs -==== - -**Question** - -How do I escape the $ (dollar sign) in a TGenshi text template? For -example, if I want to include SVN (subversion) keywords like $Id$ or -$HeadURL$ in TGenshi-generated files, or am templating a bourne shell -(sh/bash) script or Makefile (make). - -**Answer** - -Use $$ (double dollar sign) to output a literal $ (dollarsign) -in a TGenshi text template. So instead of $Id$, you'd use -$$Id$$. See also Genshi tickets `#282: Document $$ escape -convention `_ and -`#283: Allow for redefinition of template syntax per-file -`_. - -Examples -======== - -.. toctree:: - :glob: - :maxdepth: 1 - - examples/genshi/* diff -Nru bcfg2-1.3.5/doc/server/plugins/grouping/ldap.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/grouping/ldap.txt --- bcfg2-1.3.5/doc/server/plugins/grouping/ldap.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/grouping/ldap.txt 2017-01-10 19:18:17.000000000 +0000 @@ -33,39 +33,38 @@ Configuration ------------- -As processing LDAP search results can get pretty complex, the configuration has +As processing LDAP search results can get pretty complex, the configuration has to be written in Python. Here is a minimal example to get you started:: - from Bcfg2.Server.Plugins.Ldap import LdapConnection, LdapQuery, LdapSubQuery, register_query - - conn_default = LdapConnection() - conn_default.binddn = "uid=example,ou=People,dc=example,dc=com" - conn_default.bindpw = "foobat" - - @register_query + from Bcfg2.Server.Plugins.Ldap import LdapConnection, LdapQuery + + __queries__ = ['ExampleQuery'] + + conn_default = LdapConnection( + binddn="uid=example,ou=People,dc=example,dc=com", + bindpw = "foobat") + class ExampleQuery(LdapQuery): - name = "example" base = "ou=People,dc=example,dc=com" scope = "one" attrs = ["cn", "uid"] connection = conn_default - + def prepare_query(self, metadata): self.filter = "(personalServer=" + metadata.hostname + ")" - + def process_result(self, metadata): if not self.result: admin_uid = None admin_name = "This server has no admin." - return { + return { "admin_uid" : self.result[0][1]["uid"], "admin_name" : self.result[0][1]["cn"] } -The first line provides three classes for dealing with connections and queries -(details below) and a decorator function for registering your queries with the plugin. +The first line provides the two required classes for dealing with connections and queries. In this example our LDAP directory has a number of user objects in it. Each of those may have a personal server they administer. Whenever metadata for this machine is being @@ -73,7 +72,20 @@ In your bundles and config templates, you can access this data via the metadata object:: - ${metadata.Ldap["example"]["admin_name"]} + ${metadata.Ldap["ExampleQuery"]["admin_name"]} + +Connection retry +++++++++++++++++ + +If the LDAP server is down during a request, the LDAP plugin tries to reconnect after a +short delay. By default, it waits 3 seconds during the retries and tries to reconnect +up to three times. + +If you wish, you could customize these values in your ``bcfg2.conf``:: + + [ldap] + retries = 3 + retry_delay = 3.0 Class reference --------------- @@ -83,23 +95,23 @@ .. class:: LdapConnection - This class represents an LDAP connection. Every query must be associated with exactly + This class represents an LDAP connection. Every query must be associated with exactly one connection. - -.. attribute:: LdapConnection.binddn - + +.. attribute:: LdapConnection.binddn + DN used to authenticate against LDAP (required). - + .. attribute:: LdapConnection.bindpw - + Password for the previously mentioned **binddn** (required). - + .. attribute:: LdapConnection.host - + Hostname of host running the LDAP server (defaults to "localhost"). .. attribute:: LdapConnection.port - + Port where LDAP server is listening (defaults to 389). You may pass any of these attributes as keyword arguments when creating the connection object. @@ -108,143 +120,140 @@ +++++++++ .. class:: LdapQuery - + This class defines a single query that may adapt itself depending on the current metadata. .. attribute:: LdapQuery.attrs - + Can be used to retrieve only a certain subset of attributes. May either be a list of strings (attribute names) or ``None``, meaning all attributes (defaults to ``None``). .. attribute:: LdapQuery.base - - This is the search base. Only LDAP entries below this DN will be included in your + + This is the search base. Only LDAP entries below this DN will be included in your search results (required). - + .. attribute:: LdapQuery.connection - + Set this to an instance of the LdapConnection class (required). .. attribute:: LdapQuery.filter - + LDAP search filter used to narrow down search results (defaults to ``(objectClass=*)``). .. attribute:: LdapQuery.name - + This will be used as the dictionary key that provides access to the query results from - the metadata object (``metadata.Ldap["NAMEGOESHERE"]``) (required). + the metadata object: ``metadata.Ldap["NAMEGOESHERE"]`` (defaults to the class name). .. attribute:: LdapQuery.scope - - Set this to one of "base", "one" or "sub" to specify LDAP search depth (defaults to "sub"). + + Set this to one of "base", "one" or "sub" to specify LDAP search depth (defaults to "sub"). .. method:: LdapQuery.is_applicable(self, metadata) - + You can override this method to indicate whether this query makes sense for a given set of metadata (e.g. you need a query only for a certain bundle or group). - + (defaults to returning True) - -.. method:: LdapQuery.prepare_query(self, metadata) - + +.. method:: LdapQuery.prepare_query(self, metadata, \**kwargs) + Override this method to alter the query prior to execution. This is useful if your filter depends on the current metadata, e.g.:: - + self.filter = "(cn=" + metadata.hostname + ")" - + (defaults to doing nothing) -.. method:: LdapQuery.process_result(self, metadata) - +.. method:: LdapQuery.process_result(self, metadata, \**kwargs) + You will probably override this method in every query to reformat the results from LDAP. The raw result is stored in ``self.result``, you must return the altered data. Note that LDAP search results are presented in this structure:: - + ( ("DN of first entry returned", { "firstAttribute" : 1, "secondAttribute" : 2, - } + } ), ("DN of second entry returned", { "firstAttribute" : 1, "secondAttribute" : 2, - } + } ), ) - + Therefore, to return just the value of the firstAttribute of the second object returned, you'd write:: - + return self.result[1][1][0] - + (defaults to returning ``self.result`` unaltered) -LdapSubQuery -++++++++++++ +.. method:: LdapQuery.get_result(self, metadata, \**kwargs) + + This executes the query. First it will call ``prepare_query()`` for you, then it will try + to execute the query with the specified connection and last it will call ``process_result()`` + and return that return value. -.. class:: LdapSubQuery +If you use a LdapQuery class by yourself, you could pass additional keyword arguments to +``get_result()``. It will call ``prepare_query()`` and ``process_result()`` for you and +also supply this additional arguments to this methods. - Sometimes you need more than one query to obtain the data you need (e.g. use the first - query to return all websites running on metadata.hostname and another query to find all - customers that should have access to those sites). - - LdapSubQueries are the same as LdapQueries, except for that the methods - - * ``get_result()`` - * ``prepare_query()`` - * ``process_result()`` - - allow any additional keyword arguments that may contain additional data as needed. Note - that ``get_result()`` will call ``prepare_query()`` and ``process_result()`` for you, - so you shouldn't ever need to invoke these yourself, just override them. - -Here is another example that uses LdapSubQuery:: +Here is an example:: - class WebSitesQuery(LdapSubQuery): - name = "web_sites" + __queries__ = ['WebPackageQuery'] + + class WebSitesQuery(LdapQuery): filter = "(objectClass=webHostingSite)" attrs = ["dc"] connection = conn_default - + def prepare_query(self, metadata, base_dn): self.base = base_dn - - def process_result(self, metadata): + + def process_result(self, metadata, **kwargs): [...] # build sites dict from returned dc attributes return sites - - @register_query + class WebPackagesQuery(LdapQuery): - name = "web_packages" base = "dc=example,dc=com" attrs = ["customerId"] connection = conn_default - + def prepare_query(self, metadata): self.filter = "(&(objectClass=webHostingPackage)(cn:dn:=" + metadata.hostname + "))" - + def process_result(self, metadata): customers = {} for customer in self.result: dn = customer[0] cid = customer[1]["customerId"][0] - customers[cid]["sites"] = WebSitesQuery().get_result(metadata, base_dn = dn) + customers[cid]["sites"] = WebSitesQuery().get_result(metadata, base_dn=dn) return customers This example assumes that we have a number of webhosting packages that contain various -sites. We need a first query ("web_packages") to get a list of the packages our customers -have and another query for each of those to find out what sites are contained in each -package. The magic happens in the second class where ``WebSitesQuery.get_result()`` is -called with the additional ``base_dn`` parameter that allows our LdapSubQuery to only +sites. We need the ``WebPackagesQuery`` to get a list of the packages our customers +have and another query for each of those to find out what sites are contained in each +package. The magic happens in the second class where ``WebSitesQuery.get_result()`` is +called with the additional ``base_dn`` parameter that allows our LdapQuery to only search below that DN. -.. warning:: - Do NOT apply the ``register_query`` decorator to LdapSubQueries. +You do not need to add all LdapQueries to the ``__queries__`` list. Only add those to +that list, that should be called automatically and whose results should be added to the +client metadata. Known Issues ------------ * At this point there is no support for SSL/TLS. +* This module could not know, if a value changed on the LDAP server. So it could not + expire the client metadata cache sanely. + If you are using aggressive caching mode, this plugin will expire the metadata cache + for a single client at the start of a client run. If you are using LDAP data from + another client in a template, you will probably get the cached values from the last + client run of that other client. diff -Nru bcfg2-1.3.5/doc/server/plugins/grouping/metadata.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/grouping/metadata.txt --- bcfg2-1.3.5/doc/server/plugins/grouping/metadata.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/grouping/metadata.txt 2017-01-10 19:18:17.000000000 +0000 @@ -90,6 +90,8 @@ The `clients.xml`_-based model remains the default. +.. _server-plugins-grouping-metadata-groups-xml: + groups.xml ========== @@ -173,6 +175,9 @@ +Negated groups can also be used to declare other Group assignments, +but not to declare Bundle assignments. + .. note:: Nested Group conditionals, Client tags, and negated Group tags are @@ -180,76 +185,6 @@ .. xml:schema:: metadata.xsd - -XInclude -======== - -.. versionadded:: 0.9.0 - -`XInclude `_ is a W3C specification -for the inclusion of external XML documents into XML source files, -allowing complex definitions to be split into smaller, more manageable -pieces. The `Metadata`_ plugin supports the use of XInclude -specifications to split the `clients.xml`_ and `groups.xml`_ -files. This mechanism allows the following specification to produce -useful results: - -.. code-block:: xml - - - - - - -Each of the included groups files has the same format. These files are -properly validated by ``bcfg2-lint``. This mechanism is useful for -composing group definitions from multiple sources, or setting -different permissions in an svn repository. - -You can also optionally include a file that may or may not exist with -the ``fallback`` tag: - -.. code-block:: xml - - - - - - -In this case, if ``their-groups.xml`` does not exist, no error will be -raised and everything will work fine. (You can also use ``fallback`` -to include a different file, or explicit content in the case that the -parent include does not exist.) - -Wildcard XInclude ------------------ - -.. versionadded:: 1.3.1 - -Bcfg2 supports an extension to XInclude that allows you to use shell -globbing in the hrefs. (Stock XInclude doesn't support this, since -the href is supposed to be a URL.) - -For instance: - -.. code-block:: xml - - - - - -This would include all ``*.xml`` files in the ``groups`` subdirectory. - -Note that if a glob finds no files, that is treated the same as if a -single included file does not exist. You should use the ``fallback`` -tag, described above, if a glob may potentially find no files. - -Probes -====== - -The metadata plugin includes client-side probing functionality. This -is fully documented :ref:`here `. - Metadata Caching ================ diff -Nru bcfg2-1.3.5/doc/server/plugins/index.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/index.txt --- bcfg2-1.3.5/doc/server/plugins/index.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/index.txt 2017-01-10 19:18:17.000000000 +0000 @@ -1,4 +1,5 @@ .. -*- mode: rst -*- +.. vim: ft=rst .. _server-plugins-index: @@ -13,7 +14,7 @@ #. Generating configuration entry contents for clients #. Probing client-side state (like hardware inventory, etc) -- the generic client probing mechanism is described at - :ref:`server-plugins-probes-index`. + :ref:`server-plugins-probes`. #. Automating administrative tasks (e.g. :ref:`server-plugins-generators-sshbase` which automates ssh key management) diff -Nru bcfg2-1.3.5/doc/server/plugins/misc/acl.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/misc/acl.txt --- bcfg2-1.3.5/doc/server/plugins/misc/acl.txt 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/misc/acl.txt 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,235 @@ +.. -*- mode: rst -*- + +.. _server-plugins-misc-acl: + +=== +ACL +=== + +The ACL plugin lets you set client communication ACLs to prevent +clients from accessing the full range of exposed XML-RPC methods. + +You can get a list of all exposed methods by running:: + + bcfg2-admin xcmd listMethods + +Note that this will only list methods that are available to the client +this is run from; that is, if the ACL plugin is in place, +``listMethods`` will reflect the ACLs. + +ACLs can be set in two different ways: + +* IP-based ACLs allow you to set ACLs based on client IP address or + CIDR range. +* Metadata-based ACLs allow you to set ACLs based on client hostname, + group membership, or complex combinations thereof. + +IP-based ACLs are much faster, but metadata-based ACLs are often +easier and better. + +If you are not going to use any ACLs, it is recommended that you +disable this plugin because using it can incur a slight performance +hit. If you are using IP-based ACLs but *not* metadata-based ACLs, it +is similarly recommended that you ensure that your IP-based ACL file +ends with an explicit Deny for all clients; this will ensure that +metadata-based ACLs are never checked. If you are using +metadata-based ACLs, :ref:`server-caching` can alleviate most of the +performance penalty. + +Enabling the ACL plugin +======================= + +First, create ``/var/lib/bcfg2/ACL/``. Then, add ``ACL`` to your +``plugins`` list in ``bcfg2.conf``:: + + plugins = Bundler, Cfg, ..., Packages, ACL + +Finally, create ``/var/lib/bcfg2/ACL/ip.xml`` (for `IP-based ACLs`_), +``/var/lib/bcfg2/ACL/metadata.xml`` (for `Metadata-based ACLs`_), or +both. + +IP-based ACLs +============= + +IP-based ACLs allow you to set ACLs based on client IP address or CIDR +range. IP-based ACLs are very fast. If you are using IP-based ACLs +but *not* metadata-based ACLs, it is recommended that you ensure that +your IP-based ACL file ends with an explicit Deny for all clients; +this will ensure that metadata-based ACLs are never checked. + +IP-based ACLs are defined in ``ACL/ip.xml``. The file is parsed +sequentially; the first matching rule applies. Each rule is either +Allow (to allow the client access), Deny (to deny the client access), +or Defer (to defer to `Metadata-based ACLs`_). The last rule in +``ip.xml`` is an implicit default allow for 127.0.0.1, and an implicit +default defer for all other machines. + +If no ``ip.xml`` file exists, then ACL checking will be deferred to +metadata-based ACLs. + +Example +------- + +.. code-block:: xml + + + + + + + + +In this example: + +* The machine at 192.168.1.10 (perhaps the Bcfg2 server) can call all + plugin XML-RPC methods; +* Machines in the 192.168.2.0/24 network cannot assert their own + profiles; +* The machine at 192.168.1.12 (perhaps the Git server) can call the + Git.Update method; +* All machines can call core methods (except 192.168.2.0/24, which can + call all core methods except AssertProfile). + +Implicitly, all machines (except localhost) except 192.168.1.10 are +disallowed access to the plugin methods. + +You can also provide a minimal configuration to try to weed out some +obvious bad requests before doing the more expensive `Metadata-based +ACLs`_. For instance: + +.. code-block:: xml + + + + + + + +In this example: + +* All machines can call all core methods without checking metadata + ACLs; +* Plugin method calls from machines in 192.168.1.0/24 are deferred to + metadata ACLs; and +* All other plugin method calls are denied. + +The only time metadata ACLs would be checked in this example would be +plugin method calls by machines in 192.168.1.0/24. + +Reference +--------- + +.. xml:type: IPACLContainerType + +Metadata-based ACLs +=================== + +Metadata-based ACLs let you set ACLs based on client hostname or group +membership, which is much more flexible and maintainable than +`IP-based ACLs`_. The downside is that it is slower, because it +requires generating client metadata for each machine that tries to +authenticate. Without :ref:`server-caching`, using metadata-based +ACLs will double the number of client metadata builds per client run, +which could be a sizeable performance penalty. + +In order to limit the performance penalty, it's highly recommended +to: + +* Enable :ref:`server-caching` in ``cautious`` or ``aggressive`` mode; + and +* Deny as many clients as possible with `IP-based ACLs`_. + +Metadata-based ACLs are defined in ``ACL/metadata.xml``. Only Allow +and Deny rules are supported, not Defer rules. The file is parsed +sequentially; the first matching rule applies. The last rule in +``metadata.xml`` is an implicit default allow for machines called +``localhost`` or ``localhost.localdomain``, and an implicit default +deny for all other machines. + +If no ``metadata.xml`` file exists, then all requests are implicitly +allowed. + +Example +------- + +This example is functionally identical to the `IP-based ACLs` example +above, but more maintainable in several ways: + +.. code-block:: xml + + + + + + + + + + + + + + +In this case, if you add a Bcfg2 server or Git server, or one of those +servers changes IP address, you don't need to rewrite your ACLs. +Similarly, you could add a new subnet of user workstations. + +Reference +--------- + +.. xml:type: MetadataACLContainerType + +.. _server-plugins-misc-acl-wildcards: + +Wildcards +========= + +The ACL descriptions allow you to use '*' as a wildcard for any number +of characters *other than* ``.``. That is: + +* ``*`` would match ``DeclareVersion`` and ``GetProbes``, but would + *not* match ``Git.Update``. +* ``*.*`` would match ``Git.Update``, but not ``DeclareVersion`` or + ``GetProbes``. + +Since all plugin methods are scoped to their plugin (i.e., they are +all ``.``), and all core methods have no +scope, this lets you easily allow or deny core or plugin methods. You +could also do something like ``*.toggle_debug`` to allow a host to +enable or disable debugging for all plugins. + +No other bash globbing is supported. + +Examples +======== + +The :ref:`default ACL list ` can be described +in ``ip.xml`` fairly simply: + +.. code-block:: xml + + + + + + + + + + +A basic configuration that is still very secure but perhaps more +functional could be given in ``metadata.xml``: + +.. code-block:: xml + + + + + + + + + + + diff -Nru bcfg2-1.3.5/doc/server/plugins/probes/fileprobes.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/probes/fileprobes.txt --- bcfg2-1.3.5/doc/server/plugins/probes/fileprobes.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/probes/fileprobes.txt 2017-01-10 19:18:17.000000000 +0000 @@ -1,3 +1,5 @@ +.. -*- mode: rst -*- + .. _server-plugins-probes-fileprobes: ========== diff -Nru bcfg2-1.3.5/doc/server/plugins/probes/index.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/probes/index.txt --- bcfg2-1.3.5/doc/server/plugins/probes/index.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/probes/index.txt 2017-01-10 19:18:17.000000000 +0000 @@ -1,6 +1,7 @@ .. -*- mode: rst -*- +.. vim: ft=rst -.. _server-plugins-probes-index: +.. _server-plugins-probes: ====== Probes @@ -14,7 +15,7 @@ we will look at how to do this. Probes also allow dynamic group assignment for clients, see -:ref:`_server-plugins-probes-dynamic-groups`. +:ref:`server-plugins-probes-dynamic-groups`. First, create a ``Probes`` directory in our toplevel repository location:: diff -Nru bcfg2-1.3.5/doc/server/plugins/statistics/reporting.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/statistics/reporting.txt --- bcfg2-1.3.5/doc/server/plugins/statistics/reporting.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/statistics/reporting.txt 2017-01-10 19:18:17.000000000 +0000 @@ -9,7 +9,7 @@ Reporting can be enabled by adding Reporting to the plugins line in ``/etc/bcfg2.conf``: - plugins = Base,Bundler,Cfg,...,Reporting + plugins = Bundler,Cfg,...,Reporting For more information on how to use Reporting to setup reporting, see :ref:`reports-dynamic`. diff -Nru bcfg2-1.3.5/doc/server/plugins/statistics/statistics.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/statistics/statistics.txt --- bcfg2-1.3.5/doc/server/plugins/statistics/statistics.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/statistics/statistics.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -.. -*- mode: rst -*- - -.. _server-plugins-statistics-statistics: - -========== -Statistics -========== diff -Nru bcfg2-1.3.5/doc/server/plugins/structures/altsrc.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/altsrc.txt --- bcfg2-1.3.5/doc/server/plugins/structures/altsrc.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/altsrc.txt 2017-01-10 19:18:17.000000000 +0000 @@ -11,7 +11,7 @@ Altsrc is a generic, Bcfg2 server-side mechanism for performing configuration entry name remapping for the purpose of data binding. Altsrc can be used as a parameter for any entry type, and can be used -in any structure, including Bundler and Base. +in any structure. Use Cases ========= @@ -36,7 +36,7 @@ .. code-block:: xml - + @@ -58,7 +58,7 @@ .. code-block:: xml - + @@ -76,7 +76,7 @@ .. code-block:: xml - + ... @@ -97,7 +97,7 @@ .. code-block:: xml - + diff -Nru bcfg2-1.3.5/doc/server/plugins/structures/base.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/base.txt --- bcfg2-1.3.5/doc/server/plugins/structures/base.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/base.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,83 +0,0 @@ -.. -*- mode: rst -*- - -.. _server-plugins-structures-base: - -==== -Base -==== - -.. deprecated:: 1.2.0 - -.. warning:: - - The Base plugin no longer receives new features/functionality. - Please use :ref:`server-plugins-structures-bundler-index` instead. - -The Base plugin is a structure plugin that provides the ability to add -lists of unrelated entries into client configuration entry inventories. - -Base works much like Bundler in its file format. The main difference -between Base and Bundler is that Base files are included in all clients' -configuration whereas bundles must be included explicitly in your -Metadata. See the :ref:`server-plugins-structures-bundler-index` page -for details. - -If you have lots of unconnected items (for instance: software packages -whose configuration wasn't modified, and that are also not depended -on by other packages; or single directories or files not belonging -to a package), using Bundles in Metadata would clutter or enlarge -your ``Metadata/groups.xml`` file, because they all would need to be -explicitly specified. ``Base/`` on the other hand is the perfect place -to put these items. - -Without using Base, you would be forced to put them directly -into your group definitions in ``groups.xml``, either as many -small bundles (substantially enlarging it) or into something like -``Bundler/unrelated-entries.xml``. Using the latter is especially bad -if you mix packages and services in your Bundle, since for any updated -package in that bundle, the now-related services would be restarted. - -The Base entries can still be assigned based on group membership, but when -they aren't part of a group, each and every client gets the entry. So Base is -also a great place to put entries that a large number of your clients will -get. - -For example, you could have a file ``Base/packages.xml`` - -.. code-block:: xml - - - - - [...] - - - - - - - [...] - - - -.. note:: - - You don't have to reference to the files in Base from anywhere. As long - as you include ``Base`` in your ``plugins = ...`` line in ``bcfg2.conf``, - these are included automatically. - -.. note:: - - Your Base files have to match the pattern ``Base/*.xml`` to be included. - - -The decision when to use Base and when to use Bundler depends on the -configuration entry in question, and what you are trying to achieve. - -Base is mainly used for cases where you don't want/need to explicitly -include particular configuration items. Let's say all your machines are -various linux distributions. In this case, you may want to manage the -``/etc/hosts`` file using Base instead of Bundler since you will not have -to include any Bundles in your Metadata. However, you could alternatively -have a base 'linux' group that all the clients inherit which includes a -*linux* Bundle with the ``/etc/hosts`` configuration entry. diff -Nru bcfg2-1.3.5/doc/server/plugins/structures/bundler/bcfg2.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/bcfg2.txt --- bcfg2-1.3.5/doc/server/plugins/structures/bundler/bcfg2.txt 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/bcfg2.txt 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,87 @@ +.. -*- mode: rst -*- + +.. _server-plugins-structures-bundler-bcfg2-server: + +Bcfg2 Server +============ + +These two bundles split out the entries that do require a restart of +``bcfg2-server`` from those that don't. + +These bundles also demonstrate use of bound entries to avoid splitting +entries between Bundler and Rules. + +``Bundler/bcfg2-server.xml``: + +.. code-block:: xml + + + + + + + + + + + + + + + + + + + + + + + +``Bundler/bcfg2-server-base.xml``: + +.. code-block:: xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru bcfg2-1.3.5/doc/server/plugins/structures/bundler/index.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/index.txt --- bcfg2-1.3.5/doc/server/plugins/structures/bundler/index.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/index.txt 2017-01-10 19:18:17.000000000 +0000 @@ -1,7 +1,7 @@ .. -*- mode: rst -*- .. vim: ft=rst -.. _server-plugins-structures-bundler-index: +.. _server-plugins-structures-bundler: ======= Bundler @@ -20,142 +20,118 @@ Group and Client tags can be used inside of bundles to differentiate which entries particular clients will recieve; this is useful for the case where entries are named differently across systems; for example, -one linux distro may have a package called openssh while another uses -the name ssh. Configuration entries nested inside of Group elements -only apply to clients who are a member of those groups; multiple -nested groups must all apply. Also, groups may be negated; entries -included in such groups will only apply to clients who are not a -member of said group. The same applies to Client elements. +one Linux distro may have a package called ``openssh`` while another +uses the name ``ssh``. See :ref:`xml-group-client-tags` for details +and a longer example. -The following is an annotated copy of a bundle: +A brief example: .. code-block:: xml - - - - - - - - + - - - - - - - - - + - - - -In this bundle, most of the entries are common to all systems. Clients -in group **deb** get one extra package and service, while clients in -group **rpm** get two extra packages and an extra service. In -addition, clients in group **fedora** *and* group **rpm** get one -extra package entries, unless they are not in the **fc14** group, in -which case, they get an extra package. The client -**trust.example.com** gets one extra file that is not distributed to -any other clients. Notice that this file doesn't describe which -versions of these entries that clients should get, only that they -should get them. (Admittedly, this example is slightly contrived, but -demonstrates how group entries can be used in bundles) - -+----------------------------+-------------------------------+ -| Group/Hostname | Entry | -+============================+===============================+ -| all | /etc/ssh/ssh_host_dsa_key | -+----------------------------+-------------------------------+ -| all | /etc/ssh/ssh_host_rsa_key | -+----------------------------+-------------------------------+ -| all | /etc/ssh/ssh_host_dsa_key.pub | -+----------------------------+-------------------------------+ -| all | /etc/ssh/ssh_host_rsa_key.pub | -+----------------------------+-------------------------------+ -| all | /etc/ssh/ssh_host_key | -+----------------------------+-------------------------------+ -| all | /etc/ssh/ssh_host_key.pub | -+----------------------------+-------------------------------+ -| all | /etc/ssh/sshd_config | -+----------------------------+-------------------------------+ -| all | /etc/ssh/ssh_config | -+----------------------------+-------------------------------+ -| all | /etc/ssh/ssh_known_hosts | -+----------------------------+-------------------------------+ -| rpm | Package openssh | -+----------------------------+-------------------------------+ -| rpm | Package openssh-askpass | -+----------------------------+-------------------------------+ -| rpm | Service sshd | -+----------------------------+-------------------------------+ -| rpm and fedora | Package openssh-server | -+----------------------------+-------------------------------+ -| rpm and fedora and not fc4 | Package openssh-clients | -+----------------------------+-------------------------------+ -| deb | Package ssh | -+----------------------------+-------------------------------+ -| deb | Service ssh | -+----------------------------+-------------------------------+ -| trust.example.com | /etc/ssh/shosts.equiv | -+----------------------------+-------------------------------+ +Note that we do not specify *how* a given entry should be managed, +only that it should be. The concrete specification of each entry will +be provided by a different plugin such as +:ref:`server-plugins-generators-cfg`, +:ref:`server-plugins-generators-rules`, or +:ref:`server-plugins-generators-packages`. -Genshi templates -================ +Alternatively, you can use fully-bound entries in Bundler, which has +various uses. For instance: -Genshi XML templates allow you to use the `Genshi -`_ templating system to dynamically generate -a bundle. Genshi templates can be specified **one** of two ways: +.. code-block:: xml -* Add an XML-style genshi template to the Bundler directory with a - ``.genshi`` and the associated namespace attribute. -* Simply add the appropriate namespace attribute to your existing XML - bundle. + + + + + + + + + + + -The top-level Bundle tag should look like the following:: +In this example, both Service tags and one Package tag are fully bound +-- i.e., all information required by the client to manage those +entries is provided in the bundle itself. - +.. _server-plugins-structures-bundler-magic: -Several variables are pre-defined inside templates: +Bundle "Magic" +============== -+-------------+--------------------------------------------------------+ -| Name | Description | -+=============+========================================================+ -| metadata | :ref:`Client metadata | -| | ` | -+-------------+--------------------------------------------------------+ -| repo | The path to the Bcfg2 repository on the filesystem | -+-------------+--------------------------------------------------------+ +Bundles are collections of *related* entries. That point is very, +very important, because a bundle performs certain "magic" actions when +one or more entries in it are modified: + +* :xml:type:`Service ` entries whose ``restart`` + attribute is ``true`` (the default) will be restarted. +* :xml:type:`Action ` entries whose ``when`` attribute is + ``modified`` will be run. + +Because of these two magic actions, it's extremely important to +structure your bundles around Service and Action entries, rather than +around some loose idea of which entries are related. For instance, in +order to manage a Bcfg2 server, a number of packages, paths, services, +etc. must be managed. But not all of these entries would require +``bcfg2-server`` to be restarted, so to limit restarts it's wise to +split these entries into two bundles. See +:ref:`server-plugins-structures-bundler-bcfg2-server` for an example +of this. -.. note:: - ```` and ```` tags are allowed inside of Genshi - templates as of Bcfg2 1.2. However, they do not behave the same - as using a Genshi conditional, e.g.:: - - - - - The conditional is evaluated when the template is rendered, so - code inside the conditional is not executed if the conditional - fails. A ```` tag is evaluated *after* the template is - rendered, so code inside the tag is always executed. This is an - important distinction: if you have code that will fail on some - groups, you *must* use a Genshi conditional, not a ```` - tag. The same caveats apply to ```` tags. +.. _server-plugins-structures-bundler-index-disabling-magic: + +Disabling Magic +--------------- + +Disabling magic bundler actions can be done in one of two ways: + +* On a per-entry basis. Set ``restart="false"`` on a Service to + prevent it from being restarted when the bundle is modified. Set + ``when="always"`` on an Action to cause it to run every time, + regardless of whether or not the bundle was modified. +* On a per-bundle basis. Set ``independent="true"`` on the top-level + ``Bundle`` tag to signify that the bundle is a collection of + independent (i.e., unrelated) entries, and to prevent any magic + actions from being performed. (This is similar to the ``Base`` + plugin in older versions of Bcfg2.) This was added in Bcfg2 1.4. + +Service entries in independent bundles are never restarted, and Action +entries in independent bundles are only executed if ``when="always"``. +(I.e., an Action entry in an independent bundle with +``when="modified"`` is useless.) + + +.. _server-plugins-structures-bundler-index-genshi-templates: + +Genshi templates +================ + +Genshi XML templates allow you to use the `Genshi +`_ templating system to dynamically +generate a bundle. Genshi templates can be specified one of two ways: + +1. Add an XML-style genshi template to the Bundler directory with a + ``.genshi`` and the associated namespace attribute. *This is + deprecated as of Bcfg2 1.4.* +2. Add the Genshi namespace to your existing XML + bundle. -See also the :ref:`xml-genshi-reference`. +See :ref:`xml-genshi-templating` for details. Troubleshooting --------------- @@ -169,6 +145,63 @@ See :ref:`bcfg2-info ` for more details. + +.. _server-plugins-structures-bundler-index-dependencies: + +Dependencies +============ + +Dependencies on other bundles can be specified by adding a +RequiredBundle tag that adds another bundle by name, e.g.: + +.. code-block:: xml + + + + ... + + +The dependent bundle is added to the list of bundles sent to the +client, *not* to the parent bundle itself. If you want to propagate +the modification flag from the required bundle, you can add +``inherit_modification="true"`` to the RequiredBundle tag. +An example: + +``nfs-client.xml``: + +.. code-block:: xml + + + + + + + + +``automount.xml``: + +.. code-block:: xml + + + + + + + + + + + +If a new ``nfs-utils`` package was installed, the ``nfslock``, +``rpcbind``, and ``nfs`` services would be restarted, but *not* the +``autofs`` service. If you would add ``inherit_modification="true"`` +to the RequiredBundle tag, you would ensure the propagation of the +modification flag and the ``autofs`` service would be restarted, +too. But if a new ``/etc/auto.misc`` file was sent out, *only* the +``autofs`` service would be restarted, but the ``nfslock``, +``rpcbind``, and ``nfs`` services would not be restarted +(independent of the ``inherit_modification`` flag). + Altsrc ====== @@ -185,8 +218,8 @@ .. code-block:: xml - - + + Depending on the circumstance, these configuration files can either be @@ -200,7 +233,7 @@ .. code-block:: xml - + @@ -220,7 +253,7 @@ .. code-block:: xml - + @@ -232,11 +265,11 @@ .. code-block:: xml - + - + @@ -245,7 +278,7 @@ .. code-block:: xml - + @@ -275,6 +308,7 @@ .. toctree:: :maxdepth: 1 + bcfg2 kernel moab nagios diff -Nru bcfg2-1.3.5/doc/server/plugins/structures/bundler/kernel.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/kernel.txt --- bcfg2-1.3.5/doc/server/plugins/structures/bundler/kernel.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/kernel.txt 2017-01-10 19:18:17.000000000 +0000 @@ -22,7 +22,7 @@ .. code-block:: xml - + diff -Nru bcfg2-1.3.5/doc/server/plugins/structures/bundler/moab.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/moab.txt --- bcfg2-1.3.5/doc/server/plugins/structures/bundler/moab.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/moab.txt 2017-01-10 19:18:17.000000000 +0000 @@ -9,7 +9,7 @@ .. code-block:: xml - + diff -Nru bcfg2-1.3.5/doc/server/plugins/structures/bundler/nagios.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/nagios.txt --- bcfg2-1.3.5/doc/server/plugins/structures/bundler/nagios.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/nagios.txt 2017-01-10 19:18:17.000000000 +0000 @@ -12,7 +12,7 @@ .. code-block:: xml - + @@ -27,29 +27,14 @@ - - - - - - - - - - - - - + - - - - + diff -Nru bcfg2-1.3.5/doc/server/plugins/structures/bundler/ntp.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/ntp.txt --- bcfg2-1.3.5/doc/server/plugins/structures/bundler/ntp.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/ntp.txt 2017-01-10 19:18:17.000000000 +0000 @@ -12,7 +12,7 @@ .. code-block:: xml - + diff -Nru bcfg2-1.3.5/doc/server/plugins/structures/bundler/snmpd.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/snmpd.txt --- bcfg2-1.3.5/doc/server/plugins/structures/bundler/snmpd.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/snmpd.txt 2017-01-10 19:18:17.000000000 +0000 @@ -10,7 +10,7 @@ .. code-block:: xml - + diff -Nru bcfg2-1.3.5/doc/server/plugins/structures/bundler/torque.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/torque.txt --- bcfg2-1.3.5/doc/server/plugins/structures/bundler/torque.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/torque.txt 2017-01-10 19:18:17.000000000 +0000 @@ -11,7 +11,7 @@ .. code-block:: xml - + @@ -29,9 +29,7 @@ - - - + diff -Nru bcfg2-1.3.5/doc/server/plugins/structures/bundler/yp.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/yp.txt --- bcfg2-1.3.5/doc/server/plugins/structures/bundler/yp.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/bundler/yp.txt 2017-01-10 19:18:17.000000000 +0000 @@ -14,7 +14,7 @@ .. code-block:: xml - + diff -Nru bcfg2-1.3.5/doc/server/plugins/structures/defaults.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/defaults.txt --- bcfg2-1.3.5/doc/server/plugins/structures/defaults.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/structures/defaults.txt 2017-01-10 19:18:17.000000000 +0000 @@ -29,3 +29,10 @@ If you were to specify a ``type`` attribute for a Service entry in Rules (or a ``type`` attribute for a BoundService entry in Bundler), that would take precendence over the default. + +Like :ref:`server-plugins-generators-rules`, Defaults can also replace +``%{name}`` in attributes with the real name of the entry. To enable this, +add the following setting to ``bcfg2.conf``:: + + [defaults] + replace_name = yes diff -Nru bcfg2-1.3.5/doc/server/plugins/version/bzr.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/version/bzr.txt --- bcfg2-1.3.5/doc/server/plugins/version/bzr.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/version/bzr.txt 2017-01-10 19:18:17.000000000 +0000 @@ -21,7 +21,7 @@ Simply add "Bzr" to your plugins line in ``/etc/bcfg2.conf``:: [server] - plugins = Base,Bundler,Cfg,...,Bzr + plugins = Bundler,Cfg,...,Bzr Usage notes =========== diff -Nru bcfg2-1.3.5/doc/server/plugins/version/cvs.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/version/cvs.txt --- bcfg2-1.3.5/doc/server/plugins/version/cvs.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/version/cvs.txt 2017-01-10 19:18:17.000000000 +0000 @@ -21,4 +21,4 @@ Simply add "Cvs" to your plugins line in ``/etc/bcfg2.conf``:: [server] - plugins = Base,Bundler,Cfg,...,Cvs + plugins = Bundler,Cfg,...,Cvs diff -Nru bcfg2-1.3.5/doc/server/plugins/version/darcs.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/version/darcs.txt --- bcfg2-1.3.5/doc/server/plugins/version/darcs.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/version/darcs.txt 2017-01-10 19:18:17.000000000 +0000 @@ -6,7 +6,7 @@ Darcs ===== -This page describes the new Darcs plugin which is experimental. +This page describes the new Darcs plugin which is experimental. Why use the Darcs plugin ======================== @@ -25,4 +25,4 @@ simply add Darcs to your plugins line in ``/etc/bcfg2.conf``:: [server] - plugins = Base,Bundler,Cfg,...,Darcs + plugins = Bundler,Cfg,...,Darcs diff -Nru bcfg2-1.3.5/doc/server/plugins/version/fossil.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/version/fossil.txt --- bcfg2-1.3.5/doc/server/plugins/version/fossil.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/version/fossil.txt 2017-01-10 19:18:17.000000000 +0000 @@ -21,4 +21,4 @@ Simply add "Fossil" to your plugins line in ``/etc/bcfg2.conf``:: [server] - plugins = Base,Bundler,Cfg,...,Fossil + plugins = Bundler,Cfg,...,Fossil diff -Nru bcfg2-1.3.5/doc/server/plugins/version/hg.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/version/hg.txt --- bcfg2-1.3.5/doc/server/plugins/version/hg.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/plugins/version/hg.txt 2017-01-10 19:18:17.000000000 +0000 @@ -22,4 +22,4 @@ Simply add Hg to your plugins line in ``/etc/bcfg2.conf``:: [server] - plugins = Base,Bundler,Cfg,...,Hg + plugins = Bundler,Cfg,...,Hg diff -Nru bcfg2-1.3.5/doc/server/snapshots/index.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/snapshots/index.txt --- bcfg2-1.3.5/doc/server/snapshots/index.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/snapshots/index.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,155 +0,0 @@ -.. -*- mode: rst -*- - -.. _server-snapshots-index: - -=============== -Bcfg2 Snapshots -=============== - -.. versionadded:: 1.0.0 - -This page describes the Snapshots plugin. Snapshots is deprecated, and -will be removed in a future release. - -Before you begin -================ - -Make sure you have version 0.5 or greater of sqlalchemy. - -On CentOS/RHEL 5 ----------------- - -* Download a tarball of SQLAlchemy. -* Extract and build the RPM:: - - tar xzf SQLAlchemy-0.5.6.tar.gz - cd SQLAlchemy-0.5.6 - python setup.py bdist_rpm - -* Copy the RPM in ``SQLAlchemy-0.5.6/dist/`` to your Yum repository, - and rebuild the repository using ``createrepo``. -* Clear the Yum cache:: - - sudo yum clean all - -* Install SQLAlchemy:: - - sudo yum install SQLAlchemy - -* Manage the package in Bcfg2 as you would any other package. - -Configuration -============= - -* A database location needs to be added to ``bcfg2.conf``. Three drivers - are currently supported; mysql, postgres, and sqlite. When using the - sqlite driver, only the driver and database lines are required. - - * For MySQL:: - - [snapshots] - driver = mysql - database = snapshots - user = snapshots - password = snapshots - host = dbserver - - * For SQLite:: - - [snapshots] - driver = sqlite - database = /var/lib/bcfg2/var/snapshots.sqlite - -* The database needs to be initialized.:: - - $ bcfg2-admin snapshots init - 2009-03-22 21:40:24,683 INFO sqlalchemy.engine.base.Engine.0x...3e2c PRAGMA table_info("connkeyval") - PRAGMA table_info("connkeyval") - 2009-03-22 21:40:24,684 INFO sqlalchemy.engine.base.Engine.0x...3e2c () - () - 2009-03-22 21:40:24,686 INFO sqlalchemy.engine.base.Engine.0x...3e2c PRAGMA table_info("package") - PRAGMA table_info("package") - 2009-03-22 21:40:24,687 INFO sqlalchemy.engine.base.Engine.0x...3e2c () - () - ..... - COMMIT - -* The Snapshots plugin needs to be enabled for the bcfg2-server (by adding - Snapshots to the plugins line in ``/etc/bcfg2.conf``). Once done, - this will cause the the server to store statistics information when - clients run. - -Using the reports interface -=========================== - -All hosts:: - - $ bcfg2-admin snapshots reports -a - - ============= ========= ========================================== ============================ - Client Correct Revision Time - ============= ========= ========================================== ============================ - bcfg2client True f46ac7773712bd3c3cfb765ae5d2a3b2a37ac9b7 2009-04-23 11:27:54.378941 - ============= ========= ========================================== ============================ - -List bad entries for a single host:: - - $ bcfg2-admin snapshots reports -b bcfg2client - Bad entries: - Package:nscd - Package:cupsys - File:/etc/ldap.conf - -List extra entries for a single host:: - - $ bcfg2-admin snapshots reports -e bcfg2client - Extra entries: - Package:python-pyxattr - Package:librsync1 - Package:python-pylibacl - Package:gcc-4.2-multilib - Package:nxlibs - Package:freenx-session-launcher - Package:dx-doc - Package:dirdiff - Package:libhdf4g - Package:nxclient - Package:freenx-rdp - Package:freenx-vnc - Package:libxml2-dev - Package:mysql-client - Package:mysql-client-5.0 - Package:libxcompext3 - Package:lib32gomp1 - Package:dx - Package:freenx-media - Package:dxsamples - Package:gcc-multilib - Package:rdiff-backup - Package:libdbd-mysql-perl - Package:libxcomp3 - Package:freenx-server - Package:smbfs - Package:planner - Package:nxagent - Package:libc6-dev-i386 - Package:libfltk1.1-dev - Package:freenx - Package:libdx4 - Package:libxcompshad3 - Service:freenx-server - -Detailed view of hosts for a particular date:: - - $ bcfg2-admin snapshots reports --date 2009 5 30 - ============= ========= ========================================== ============================ - Client Correct Revision Time - ============= ========= ========================================== ============================ - bcfg2client False 10c1a12c62c57c0861cc453b8d2640c4839a7357 2009-05-29 10:52:34.701056 - -TODO/Wishlist -============= - -* Identify per-client changes in correctness over time -* Detailed view for a particular date -* Track entry changes over time (glibc updated on these dates to these versions) diff -Nru bcfg2-1.3.5/doc/server/xml-common.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/xml-common.txt --- bcfg2-1.3.5/doc/server/xml-common.txt 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/server/xml-common.txt 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,376 @@ +.. -*- mode: rst -*- +.. vim: ft=rst + +.. _xml-features: + +===================== + Common XML Features +===================== + +Most of the XML files in Bcfg2 have a common set of features that are +supported. These are described in some detail below, and a precise +rundown of which features are supported by which files is provided. + +.. _xml-group-client-tags: + +Group and Client tags +===================== + +These allow the portions of an XML document inside a Client or Group +tag to only apply to the given client group. That is, they can be +thought of as conditionals, where the following are roughly equivalent: + +.. code-block:: xml + + + + + +And:: + + If client is a member of group1 then + Manage the abstract path "/etc/foo.conf" + +Nested Group and Client tags are conjunctive (logical ``AND``). For +instance, the following are roughly equivalent: + +.. code-block:: xml + + + + + + + + +And:: + + If client is a member of group1 and has hostname "foo.example.com" then + Manage the abstract package "bar" + If client is a member of group1 then + Manage the abstract package "baz" + +There is no convenient ``else``; you must specify all conditions +explicitly. To do this, Group and Client tags may be negated, as in: + +.. code-block:: xml + + + + + + + + +This is roughly equivalent to:: + + If client is a member of group1 then + Manage the abstract service "foo" + If client is not a member of group 1 then + Manage the abstract service "bar" + +Or, more compactly: + + If client is a member of group1 then + Manage the abstract service "foo" + Else + Manage the abstract service "bar" + +As an example, consider the following :ref:`bundle +`: + +.. code-block:: xml + + + + + + + + + + + + + + + + + + + + + + + +In this bundle, most of the entries are common to all systems. Clients +in group ``deb`` get one extra package and service, while clients in +group ``rpm`` get two extra packages and an extra service. In +addition, clients in group ``fedora`` *and* group ``rpm`` get one +extra package entries, unless they are not in the ``fedora14`` group, +in which case, they get an extra package. The client +``trust.example.com`` gets one extra file that is not distributed to +any other clients. + ++------------------------+-----------------------------------+ +| Group/Hostname | Entry | ++========================+===================================+ +| all | ``/etc/ssh/*`` | ++------------------------+-----------------------------------+ +| ``rpm`` | Package ``openssh`` | ++------------------------+-----------------------------------+ +| ``rpm`` | Package ``openssh-askpass`` | ++------------------------+-----------------------------------+ +| ``rpm`` | Service ``sshd`` | ++------------------------+-----------------------------------+ +| ``rpm`` AND ``fedora`` | Package ``openssh-server`` | ++------------------------+-----------------------------------+ +| ``rpm`` AND ``fedora`` | Package ``openssh-clients`` | +| AND NOT ``fedora14`` | | ++------------------------+-----------------------------------+ +| ``deb`` | Package ``ssh`` | ++------------------------+-----------------------------------+ +| ``deb`` | Service ``ssh`` | ++------------------------+-----------------------------------+ +| ``trust.example.com`` | ``/etc/ssh/shosts.equiv`` | ++------------------------+-----------------------------------+ + +.. _xml-genshi-templating: + +Genshi templating +================= + +Genshi XML templates allow you to use the `Genshi +`_ templating system to dynamically +generate XML file content for a given client. Genshi templating can +be enabled on a file by adding the Genshi namespace to the top-level +tag, e.g.: + +.. code-block:: xml + + + +Several variables are pre-defined inside Genshi XML templates: + ++-------------+--------------------------------------------------------+ +| Name | Description | ++=============+========================================================+ +| metadata | :ref:`Client metadata | +| | ` | ++-------------+--------------------------------------------------------+ +| repo | The path to the Bcfg2 repository on the filesystem | ++-------------+--------------------------------------------------------+ + +.. note:: + + ```` and ```` tags can be used inside templates as + of Bcfg2 1.2, but they do not behave the same as using a Genshi + conditional, e.g.:: + + + + + The conditional is evaluated when the template is rendered, so + code inside the conditional is not executed if the conditional + fails. A ```` tag is evaluated *after* the template is + rendered, so code inside the tag is always executed. This is an + important distinction: if you have code that will fail on some + groups, you *must* use a Genshi conditional, not a ```` + tag. The same caveats apply to ```` tags. + +.. _xml-genshi-reference: + +Genshi XML Template Reference +----------------------------- + +The Genshi XML templating language is described in depth at `Genshi +`_. The XML schema reference follows. + +Genshi Tags +~~~~~~~~~~~ + +.. xml:group:: genshiElements + :namespace: py + +Genshi Attributes +~~~~~~~~~~~~~~~~~ + +.. xml:attributegroup:: genshiAttrs + :namespace: py + +.. _xml-encryption: + +Encryption +========== + +You can encrypt data in XML files to protect that data from other +people who need access to the repository. The data is decrypted +transparently on-the-fly by the server. + +.. note:: + + This feature is *not* intended to secure the files against a + malicious attacker who has gained access to your Bcfg2 server, as + the encryption passphrases are held in plaintext in + ``bcfg2.conf``. This is only intended to make it easier to use a + single Bcfg2 repository with multiple admins who should not + necessarily have access to each other's sensitive data. + +XML files are encrypted on a per-element basis; that is, rather than +encrypting the whole file, only the character content of individual +elements is encrypted. This makes it easier to track changes to the +file in a VCS, and also lets unprivileged users work with the other +data in the file. Only character content of an element can be +encrypted; attribute content and XML elements themselves cannot be +encrypted. + +By default, decryption is *strict*; that is, if any element cannot be +decrypted, parsing of the file is aborted. See +:ref:`server-encryption-lax-strict` for information on changing this +on a global or per-file basis. + +To encrypt or decrypt a file, use :ref:`bcfg2-crypt`. + +See :ref:`server-encryption` for more details on encryption in Bcfg2 +in general. + +XInclude +======== + +.. versionadded:: 0.9.0 + +`XInclude `_ is a W3C specification +for the inclusion of external XML documents into XML source files, +allowing complex definitions to be split into smaller, more manageable +pieces. For instance, in the :ref:`server-plugins-grouping-metadata` +``groups.xml`` file, you might do: + +.. code-block:: xml + + + + + + +To enable XInclude on a file, you need only add the XInclude namespace +to the top-level tag. + +You can also *optionally* include a file that may or may not exist +with the ``fallback`` tag: + +.. code-block:: xml + + + + + + +In this case, if ``their-groups.xml`` does not exist, no error will be +raised and everything will work fine. (You can also use ``fallback`` +to include a different file, or explicit content in the case that the +parent include does not exist.) + +XInclude can only include complete, well-formed XML files. In some +cases, it may not be entirely obvious or intuitive how to structure +such an included file to conform to the schema, although in general +the included files should be structure exactly like the parent file. + +Wildcard XInclude +----------------- + +.. versionadded:: 1.3.1 + +Bcfg2 supports an extension to XInclude that allows you to use shell +globbing in the hrefs. (Stock XInclude doesn't support this, since +the href is supposed to be a URL.) + +For instance: + +.. code-block:: xml + + + + + +This would include all ``*.xml`` files in the ``groups`` subdirectory. + +Note that if a glob finds no files, that is treated the same as if a +single included file does not exist. You should use the ``fallback`` +tag, described above, if a glob may potentially find no files. + +Feature Matrix +============== + ++---------------------------------------------------+--------------+--------+------------+------------+ +| File | Group/Client | Genshi | Encryption | XInclude | ++===================================================+==============+========+============+============+ +| :ref:`ACL ip.xml ` | No | No | No | Yes | ++---------------------------------------------------+--------------+--------+------------+------------+ +| :ref:`ACL metadata.xml | Yes | Yes | Yes | Yes | +| ` | | | | | ++---------------------------------------------------+--------------+--------+------------+------------+ +| :ref:`Bundler | Yes | Yes | Yes | Yes | +| ` | | | | | ++---------------------------------------------------+--------------+--------+------------+------------+ +| :ref:`info.xml ` | Yes [#f1]_ | Yes | Yes | Yes | ++---------------------------------------------------+--------------+--------+------------+------------+ +| :ref:`privkey.xml and pubkey.xml | Yes | Yes | Yes | Yes [#f2]_ | +| ` | | | | | ++---------------------------------------------------+--------------+--------+------------+------------+ +| :ref:`authorizedkeys.xml | Yes | Yes | Yes | Yes | +| ` | | | | | ++---------------------------------------------------+--------------+--------+------------+------------+ +| :ref:`sslcert.xml and sslkey.xml | Yes | Yes | Yes | Yes | +| ` | | | | | ++---------------------------------------------------+--------------+--------+------------+------------+ +| :ref:`Decisions | Yes | Yes | Yes | Yes | +| ` | | | | | ++---------------------------------------------------+--------------+--------+------------+------------+ +| :ref:`Defaults | Yes | Yes | Yes | Yes | +| ` | | | | | ++---------------------------------------------------+--------------+--------+------------+------------+ +| :ref:`FileProbes | Yes | Yes | Yes | Yes | +| ` | | | | | ++---------------------------------------------------+--------------+--------+------------+------------+ +| :ref:`GroupPatterns | No | No | No | Yes | +| ` | | | | | ++---------------------------------------------------+--------------+--------+------------+------------+ +| :ref:`Metadata clients.xml | No | No | No | Yes | +| ` | | | | | ++---------------------------------------------------+--------------+--------+------------+------------+ +| :ref:`Metadata groups.xml | Yes [#f3]_ | No | No | Yes | +| ` | | | | | ++---------------------------------------------------+--------------+--------+------------+------------+ +| :ref:`NagiosGen | Yes | Yes | Yes | Yes | +| ` | | | | | ++---------------------------------------------------+--------------+--------+------------+------------+ +| :ref:`Packages | Yes | Yes | Yes | Yes | +| ` | | | | | ++---------------------------------------------------+--------------+--------+------------+------------+ +| :ref:`Pkgmgr | Yes | No | No | No | +| ` | | | | | ++---------------------------------------------------+--------------+--------+------------+------------+ +| :ref:`Properties | Yes [#f4]_ | Yes | Yes | Yes | +| ` | | | | | ++---------------------------------------------------+--------------+--------+------------+------------+ +| :ref:`Rules ` | Yes | Yes | Yes | Yes | ++---------------------------------------------------+--------------+--------+------------+------------+ + +.. rubric:: Footnotes + +.. [#f1] ``info.xml`` also supports conditional Path tags; see + :ref:`server-info` for more. +.. [#f2] XInclude is supported, but the schema has not been modified + to allow including files that are structured exactly like the + parent. You may need to read the schema to understand how to + use XInclude properly. +.. [#f3] The semantics of Group tags in ``groups.xml`` is slightly + different; see + :ref:`server-plugins-grouping-metadata-groups-xml` for + details. +.. [#f4] Group and Client tags in XML Properties are not automatic by + default; they can be resolved by use of either the + ``Match()`` or ``XMLMatch()`` methods, or by use of the + :ref:`server-plugins-connectors-properties-automatch` + feature. See :ref:`server-plugins-connectors-properties-xml` + for details. diff -Nru bcfg2-1.3.5/doc/unsorted/howtos.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/unsorted/howtos.txt --- bcfg2-1.3.5/doc/unsorted/howtos.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/unsorted/howtos.txt 2017-01-10 19:18:17.000000000 +0000 @@ -12,7 +12,7 @@ * AnnotatedExamples - a description of basic Bcfg2 specification operations * BuildingDebianPackages - How to build debian packages * :ref:`appendix-guides-gentoo` - Issues specific to running Bcfg2 on Gentoo -* :ref:`server-plugins-probes-index` - How to use Probes to gather information from a client machine. +* :ref:`server-plugins-probes` - How to use Probes to gather information from a client machine. * :ref:`client-tools-actions` - How to use Actions * :ref:`server-plugins-probes-dynamic-groups` - Using dynamic groups * :ref:`client-modes-paranoid` - How to run an update in paranoid mode diff -Nru bcfg2-1.3.5/doc/unsorted/index.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/unsorted/index.txt --- bcfg2-1.3.5/doc/unsorted/index.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/unsorted/index.txt 2017-01-10 19:18:17.000000000 +0000 @@ -13,7 +13,6 @@ .. _TitleIndex: https://trac.mcs.anl.gov/projects/bcfg2/wiki/TitleIndex -* `Plugins/Snapshots` * `PrecompiledPackages` * `SchemaEvolution` * `SecurityDevPlan` diff -Nru bcfg2-1.3.5/doc/unsorted/vim_snippet.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/unsorted/vim_snippet.txt --- bcfg2-1.3.5/doc/unsorted/vim_snippet.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/doc/unsorted/vim_snippet.txt 2017-01-10 19:18:17.000000000 +0000 @@ -13,7 +13,7 @@ #. Install it using the install instructions (unzip snipMate.zip -d ~/.vim or equivalent, e.g. $HOME\vimfiles on Windows) #. Add the following to ``~/.vim/snippets/xml.snippets`` - .. code-block:: cl + .. code-block:: none # Bundle snippet + but this does not explicitly identify that an RPM package version 0.9.2 should be loaded from http://rpm.repo.server/bcfg2-0.9.2-0.1.rpm. -The abstract configuration is defined in the xml configuration files -for the Base and Bundles plugins. +The abstract configuration is defined in the XML configuration files +for the Bundler plugin. A combination of a clients metadata (group memberships) and abstract configuration is then used to generate the clients literal configuration. @@ -57,35 +57,13 @@ =================================== A clients Abstract Configuration is the inventory of configuration -entities that should be installed on a client. Two plugins provide the -basis for the abstract configuration, the Bundler and Base. +entities that should be installed on a client. The Bundler plugin +usually provides the abstract configuration. The plugin Bundler builds descriptions of interrelated configuration entities. These are typically used for the representation of services, or other complex groups of entities. -The Base provides a laundry list of configuration entities that need to -be installed on hosts. These entities are independent from one another, -and can be installed individually without worrying about the impact on -other entities. - -Usage of Groups in Base and Bundles ------------------------------------ - -Groups are used by the Base and Bundles plugins for selecting -Configuration Entity Types for inclusion in a clients abstract -configuration. They can be thought of as:: - - if client is a member of group1 then - assign to abstract config - -Nested groups are conjunctive (logical and).:: - - if client is a member of group1 and group2 then - assign to abstract config - -Group membership maybe negated. See "Writing Bundles" for an example. - Configuration Entity Types -------------------------- @@ -121,9 +99,8 @@ If any of these pieces are installed or updated, all should be rechecked and any associated services should be restarted. -All files in the Bundles/ subdirectory of the repository are processed. -Each bundle must be defined in its own file and the filename must be the -same as the bundle name with a .xml suffix.:: +All files in the Bundles/ subdirectory of the repository are +processed. Each bundle must be defined in its own file:: # ls Bundler Glide3.xml @@ -144,17 +121,6 @@ atftp.xml .... -Groups can be used inside of bundles to differentiate which entries -particular clients will receive. This is useful for the case where -entries are named differently across systems; for example, one linux -distro may have a package called openssh while another uses the name ssh. -Configuration entries nested inside of Group elements only apply to -clients who are a member of those groups; multiply nested groups must -all apply. - -Also, groups may be negated; entries included in such groups will only -apply to clients who are not a member of said group. - When packages in a bundle are verified by the client toolset, the Paths included in the same bundle are taken into consideration. That is, a package will not fail verification from a Bcfg2 perspective if the @@ -165,16 +131,8 @@ .. code-block:: xml - - - - - - - - - - + + @@ -205,23 +163,7 @@ +----------------+-------------------------------+ | Group | Entry | +================+===============================+ -| all | /etc/ssh/ssh_host_dsa_key | -+----------------+-------------------------------+ -| all | /etc/ssh/ssh_host_rsa_key | -+----------------+-------------------------------+ -| all | /etc/ssh/ssh_host_dsa_key.pub | -+----------------+-------------------------------+ -| all | /etc/ssh/ssh_host_rsa_key.pub | -+----------------+-------------------------------+ -| all | /etc/ssh/ssh_host_key | -+----------------+-------------------------------+ -| all | /etc/ssh/ssh_host_key.pub | -+----------------+-------------------------------+ -| all | /etc/ssh/sshd_config | -+----------------+-------------------------------+ -| all | /etc/ssh/ssh_config | -+----------------+-------------------------------+ -| all | /etc/ssh/ssh_known_hosts | +| all | /etc/ssh/* | +----------------+-------------------------------+ | rpm | Package openssh | +----------------+-------------------------------+ @@ -268,26 +210,3 @@ this element is requested by the client, the server dynamically generates it either by crunching data and creating new information or by reading a file off of disk and passes it down to the client for installation. - -Usage of Groups in Generators ------------------------------ - -Similar to Abstract Configuration plugins, groups are used by generator -plugins for selecting Configuration Entities for inclusion in a clients -literal configuration. They can be thought of as:: - - if client is a member of group1 then - assign to abstract config - -Nested groups are conjunctive (logical and).:: - - if client is a member of group1 and group2 then - assign to abstract config - -How the groups are configured is specific to the plugin, but here are -two common methods: - -* xml configuration file (Pkgmgr, Rules) -* file name encoding (Cfg, SSHBase) - -Details are included on each plugin's page. diff -Nru bcfg2-1.3.5/examples/bcfg2.conf bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/bcfg2.conf --- bcfg2-1.3.5/examples/bcfg2.conf 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/bcfg2.conf 2017-01-10 19:18:17.000000000 +0000 @@ -1,5 +1,4 @@ [communication] -protocol = xmlrpc/ssl password = foobat # certificate = /etc/bcfg2.key # key = /etc/bcfg2.key diff -Nru bcfg2-1.3.5/examples/bcfg2.confHostbase bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/bcfg2.confHostbase --- bcfg2-1.3.5/examples/bcfg2.confHostbase 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/bcfg2.confHostbase 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -[server] -repository = /var/lib/bcfg2 -plugins = Bundler,Rules,Metadata,SSHbase,Cfg - -[statistics] -sendmailpath = /usr/sbin/sendmail - -[communication] -protocol = xmlrpc/ssl -password = foobat -key = /etc/bcfg2.key - -[components] -bcfg2 = https://localhost:6789 - -[hostbase] -# postgresql, mysql, sqlite3 or ado_mssql -database_engine = mysql -# Or path to database file if using sqlite3. -database_name = -# Not used with sqlite3. -database_user = -# Not used with sqlite3. -database_password = -# Set to empty string for localhost. Not used with sqlite3. -database_host = -# Set to empty string for default. Not used with sqlite3. -database_port = 3306 -# enter an NIS group name you'd like to give access to edit hostbase records -##authorized_group = support -# default mx record for new hosts added to the database -default_mx = mailserver.yourdomain.net -priority = 30 diff -Nru bcfg2-1.3.5/examples/Bundler/dirvish.xml bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/Bundler/dirvish.xml --- bcfg2-1.3.5/examples/Bundler/dirvish.xml 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/Bundler/dirvish.xml 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,20 @@ + + + + client: nfs-host +tree: /export/homes/${user.text} +exclude: + *~ + .nfs* + + + + + + + + diff -Nru bcfg2-1.3.5/examples/Bundler/sgenshi-dirvish.genshi bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/Bundler/sgenshi-dirvish.genshi --- bcfg2-1.3.5/examples/Bundler/sgenshi-dirvish.genshi 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/Bundler/sgenshi-dirvish.genshi 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ - - - - -client: nfs-host -tree: /export/homes/${user.text} -exclude: - *~ - .nfs* - - - - - - - - diff -Nru bcfg2-1.3.5/examples/Cfg/etc/dirvish/master.conf/master.conf.genshi bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/Cfg/etc/dirvish/master.conf/master.conf.genshi --- bcfg2-1.3.5/examples/Cfg/etc/dirvish/master.conf/master.conf.genshi 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/Cfg/etc/dirvish/master.conf/master.conf.genshi 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,25 @@ +bank: + /backup + +image-default: %Y-%m-%d +log: bzip2 +index: bzip2 +xdev: 1 + +exclude: + lost+found/ + *~ + .nfs* + +Runall: +{% for user in metadata.Properties['dirvish.xml'].data.find('users') %}\ + homes/${user.tag} +{% end %}\ + +expire-default: +2 weeks + +expire-rule: +# MIN HR DOM MON DOW STRFTIME_FMT + * * * * 1 +6 weeks + * * 1-7 * 1 +6 months + * * 1-7 1,4,7,10 1 never diff -Nru bcfg2-1.3.5/examples/TemplateHelper/include.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/TemplateHelper/include.py --- bcfg2-1.3.5/examples/TemplateHelper/include.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/TemplateHelper/include.py 2017-01-10 19:18:17.000000000 +0000 @@ -5,14 +5,13 @@ {% python import os - include = metadata.TemplateHelper['include'] - custom = include.IncludeHelper(metadata, path).files(os.path.basename(name)) + custom = IncludeHelper(metadata, path).files(os.path.basename(name)) %}\ {% for file in custom %}\ - ########## Start ${include.describe_specificity(file)} ########## + ########## Start ${describe_specificity(file)} ########## {% include ${file} %} - ########## End ${include.describe_specificity(file)} ########## + ########## End ${describe_specificity(file)} ########## {% end %}\ This would let you include files with the same base name; e.g. in a @@ -20,7 +19,7 @@ ''foo.conf.G_.genshi_include''. If a template needs to include different files in different places, you can do that like so: - inc = metadata.TemplateHelper['include'].IncludeHelper(metadata, path) + inc = IncludeHelper(metadata, path) custom_bar = inc.files("bar") custom_baz = inc.files("baz") @@ -33,7 +32,7 @@ import os import re -__export__ = ["IncludeHelper", "get_specificity", "describe_specificity"] +__default__ = ["IncludeHelper", "get_specificity", "describe_specificity"] class IncludeHelper(object): diff -Nru bcfg2-1.3.5/examples/TGenshi/etc/dirvish/master.conf/template.newtxt bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/TGenshi/etc/dirvish/master.conf/template.newtxt --- bcfg2-1.3.5/examples/TGenshi/etc/dirvish/master.conf/template.newtxt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/TGenshi/etc/dirvish/master.conf/template.newtxt 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -bank: - /backup - -image-default: %Y-%m-%d -log: bzip2 -index: bzip2 -xdev: 1 - -exclude: - lost+found/ - *~ - .nfs* - -Runall: -{% for user in metadata.Properties['dirvish.xml'].data.find('users') %}\ - homes/${user.tag} -{% end %}\ - -expire-default: +2 weeks - -expire-rule: -# MIN HR DOM MON DOW STRFTIME_FMT - * * * * 1 +6 weeks - * * 1-7 * 1 +6 months - * * 1-7 1,4,7,10 1 never diff -Nru bcfg2-1.3.5/examples/TGenshi/etc/motd/template.newtxt bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/TGenshi/etc/motd/template.newtxt --- bcfg2-1.3.5/examples/TGenshi/etc/motd/template.newtxt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/TGenshi/etc/motd/template.newtxt 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ ------------------------------------------------------------------------- - GOALS FOR SERVER MANAGED BY BCFG2 ------------------------------------------------------------------------- -Hostname is ${metadata.hostname} - -Groups: -{% for group in metadata.groups %}\ - * ${group} -{% end %}\ - -{% if metadata.categories %}\ -Categories: -{% for category in metadata.categories %}\ - * ${category} -{% end %}\ -{% end %}\ - - -{% if metadata.Probes %}\ -Probes: -{% for probe, value in metadata.Probes.iteritems() %}\ - * ${probe} \ - ${value} -{% end %}\ -{% end %}\ - ------------------------------------------------------------------------- - ITOPS MOTD ------------------------------------------------------------------------- -Please create a Ticket for any system level changes you need from IT. - diff -Nru bcfg2-1.3.5/examples/TGenshi/tmp/bar/template.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/TGenshi/tmp/bar/template.txt --- bcfg2-1.3.5/examples/TGenshi/tmp/bar/template.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/TGenshi/tmp/bar/template.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -[communication] -protocol = xmlrpc/ssl -#if metadata.uuid != None -user = $metadata.uuid -#end -#choose -#when metadata.password is not None -password = $metadata.password -#end -#when metadata.password is None -password = GlobalPassword -#end -#end - -[client] -drivers = Action,Chkconfig,POSIX,YUMng - -[components] -bcfg2 = https://config.example.com:6789 diff -Nru bcfg2-1.3.5/examples/TGenshi/tmp/foo/template.xml bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/TGenshi/tmp/foo/template.xml --- bcfg2-1.3.5/examples/TGenshi/tmp/foo/template.xml 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/examples/TGenshi/tmp/foo/template.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,46 +0,0 @@ - - - ${name} - - - - - - - - - - - - - - - - - - - - - - - - -
Name:${name}
Hostname:${metadata.hostname}
Toolset:${metadata.hostname}
UUID:${metadata.uuid}
Password:${metadata.password}
Bundles: - - -
${bundle}
-
Groups: - - -
${group}
-
Categories: - - -
${category}
-
Probes: - - -
${probe}${metadata.probes[probe]}
-
- - diff -Nru bcfg2-1.3.5/man/bcfg2-admin.8 bcfg2-1.4.0~pre2+git141-g6d40dace6358/man/bcfg2-admin.8 --- bcfg2-1.3.5/man/bcfg2-admin.8 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/man/bcfg2-admin.8 2017-01-10 19:18:17.000000000 +0000 @@ -1,4 +1,4 @@ -.TH "BCFG2-ADMIN" "8" "March 18, 2013" "1.3" "Bcfg2" +.TH "BCFG2-ADMIN" "8" "April 06, 2014" "1.3" "Bcfg2" .SH NAME bcfg2-admin \- Perform repository administration tasks . @@ -78,10 +78,6 @@ .B backup Create an archive of the entire Bcfg2 repository. .TP -.B bundle \fIaction\fP -Display details about the available bundles (See BUNDLE OPTIONS -below). -.TP .B client \fIaction\fP \fIclient\fP [attribute=value] Add, edit, or remove clients entries in metadata (See CLIENT OPTIONS below). @@ -91,9 +87,15 @@ behavior between releases. Determine differences between files or directories (See COMPARE OPTIONS below). .TP +.B dbshell +Call the Django \(aqdbshell\(aq command on the configured database. +.TP .B init Initialize a new repository (interactive). .TP +.B initreports +Initialize the Reporting database. +.TP .B minestruct \fIclient\fP [\-f xml\-file] [\-g groups] Build structure entries based on client statistics extra entries (See MINESTRUCT OPTIONS below). @@ -105,12 +107,21 @@ Install configuration information into repo based on client bad entries (See PULL OPTIONS below). .TP -.B reports [init|load_stats|purge|scrub|update] -Interact with the dynamic reporting system (See REPORTS OPTIONS -below). +.B purgereports +Purge historic and expired data from the Reporting database +.TP +.B reportssqlall +Call the Django \(aqshell\(aq command on the Reporting database. .TP -.B snapshots [init|dump|query|reports] -Interact with the Snapshots database (See SNAPSHOTS OPTIONS below). +.B reportsstats +Print Reporting database statistics. +.TP +.B scrubreports +Scrub the Reporting database for duplicate reasons and orphaned +entries. +.TP +.B shell +Call the Django \(aqshell\(aq command on the configured database. .TP .B syncdb Sync the Django ORM with the configured database. @@ -118,6 +129,12 @@ .B tidy Remove unused files from repository. .TP +.B updatereports +Apply database schema updates to the Reporting database. +.TP +.B validatedb +Call the Django \(aqvalidate\(aq command on the configured database. +.TP .B viz [\-H] [\-b] [\-k] [\-o png\-file] Create a graphviz diagram of client, group and bundle information (See VIZ OPTIONS below). @@ -125,23 +142,6 @@ .B xcmd Provides a XML\-RPC Command Interface to the bcfg2\-server. .UNINDENT -.SS BUNDLE OPTIONS -.INDENT 0.0 -.TP -.B mode -One of the following. -.INDENT 7.0 -.TP -.B \fIlist\-xml\fP -List all available xml bundles -.TP -.B \fIlist\-genshi\fP -List all available genshi bundles -.TP -.B \fIshow\fP -Interactive dialog to get details about the available bundles -.UNINDENT -.UNINDENT .SS CLIENT OPTIONS .INDENT 0.0 .TP @@ -170,11 +170,24 @@ .SS COMPARE OPTIONS .INDENT 0.0 .TP +.B \-d \fIN\fP, \-\-diff\-lines \fIN\fP +Show only N lines of a diff +.UNINDENT +.INDENT 0.0 +.TP +.B \-c, \-\-color +Show colors even if not ryn from a TTY +.TP +.B \-q, \-\-quiet +Only show that entries differ, not how they differ +.UNINDENT +.INDENT 0.0 +.TP .B old -Specify the location of the old configuration file. +Specify the location of the old configuration(s). .TP .B new -Specify the location of the new configuration file. +Specify the location of the new configuration(s). .UNINDENT .SS MINESTRUCT OPTIONS .INDENT 0.0 @@ -200,51 +213,30 @@ .B entry name Specify the name of the entry to pull. .UNINDENT -.SS REPORTS OPTIONS -.INDENT 0.0 -.TP -.B load_stats [\-s] [\-c] [\-03] -Load statistics data. -.TP -.B purge [\-\-client [n]] [\-\-days [n]] [\-\-expired] -Purge historic and expired data. -.TP -.B scrub -Scrub the database for duplicate reasons and orphaned entries. -.TP -.B update -Apply any updates to the reporting database. -.UNINDENT -.SS SNAPSHOTS OPTIONS -.INDENT 0.0 -.TP -.B init -Initialize the snapshots database. -.TP -.B query -Query the snapshots database. -.TP -.B dump -Dump some of the contents of the snapshots database. -.TP -.B reports [\-a] [\-b] [\-e] [\-\-date=MM\-DD\-YYYY] -Generate reports for clients in the snapshots database. -.UNINDENT .SS VIZ OPTIONS .INDENT 0.0 .TP -.B \-H +.B \-H, \-\-includehosts Include hosts in diagram. .TP -.B \-b +.B \-b, \-\-includebundles Include bundles in diagram. +.UNINDENT +.INDENT 0.0 .TP -.BI \-o \ +.B \-o \fIoutfile\fP, \-\-outfile \fIoutfile\fP Write to outfile file instead of stdout. +.UNINDENT +.INDENT 0.0 .TP -.B \-k +.B \-k, \-\-includekey Add a shape/color key. .UNINDENT +.INDENT 0.0 +.TP +.B \-c \fIhostname\fP, \-\-only\-client \fIhostname\fP +Only show groups and bundles for the named client +.UNINDENT .SH SEE ALSO .sp \fIbcfg2\-info(8)\fP, \fIbcfg2\-server(8)\fP diff -Nru bcfg2-1.3.5/man/bcfg2.conf.5 bcfg2-1.4.0~pre2+git141-g6d40dace6358/man/bcfg2.conf.5 --- bcfg2-1.3.5/man/bcfg2.conf.5 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/man/bcfg2.conf.5 2017-01-10 19:18:17.000000000 +0000 @@ -1,4 +1,6 @@ -.TH "BCFG2.CONF" "5" "July 19, 2013" "1.3" "Bcfg2" +.\" Man page generated from reStructuredText. +. +.TH "BCFG2.CONF" "5" "November 04, 2014" "1.4" "Bcfg2" .SH NAME bcfg2.conf \- Configuration parameters for Bcfg2 . @@ -28,8 +30,6 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.\" Man page generated from reStructuredText. -. .SH DESCRIPTION .sp bcfg2.conf includes configuration parameters for the Bcfg2 server and @@ -69,7 +69,6 @@ .ft C inotify gamin -fam pseudo .ft P .fi @@ -77,11 +76,10 @@ .UNINDENT .TP .B fam_blocking -. Whether the server should block at startup until the file monitor backend has processed all events. This can cause a slower startup, but ensure that all files are recognized before the first client -is handled. +is handled. Defaults to True. .TP .B ignore_files A comma\-separated list of globs that should be ignored by the file @@ -109,7 +107,7 @@ .B listen_all This setting tells the server to listen on all available interfaces. The default is to only listen on those interfaces specified by the -bcfg2 setting in the components section of \fBbcfg2.conf\fP. +bcfg2 setting in the components section of \fBbcfg2.conf\fP\&. .TP .B plugins A comma\-delimited list of enabled server plugins. Currently @@ -119,24 +117,22 @@ .sp .nf .ft C -Account -Base +ACL Bundler Bzr Cfg Cvs Darcs -DBStats Decisions +Defaults Deps -Editor FileProbes Fossil Git +GroupLogic GroupPatterns Guppy Hg -Hostbase Ldap Metadata NagiosGen @@ -151,14 +147,9 @@ Rules SEModules ServiceCompat -Snapshots SSHbase -SSLCA -Statistics Svn -TCheetah TemplateHelper -TGenshi Trigger .ft P .fi @@ -188,51 +179,32 @@ .UNINDENT .UNINDENT .sp -The default is \fIbest\fP, which is currently an alias for \fIbuiltin\fP. +The default is \fIbest\fP, which is currently an alias for \fIbuiltin\fP\&. More details on the backends can be found in the official documentation. .TP .B user -The username or UID to run the daemon as. Default is \fI0\fP. +The username or UID to run the daemon as. Default is \fI0\fP\&. .TP .B group -The group name or GID to run the daemon as. Default is \fI0\fP. +The group name or GID to run the daemon as. Default is \fI0\fP\&. .TP .B vcs_root Specifies the path to the root of the VCS working copy that holds -your Bcfg2 specification, if it is different from \fIrepository\fP. +your Bcfg2 specification, if it is different from \fIrepository\fP\&. E.g., if the VCS repository does not hold the bcfg2 data at the top level, you may need to set this option. .TP .B umask -The umask to set for the server. Default is \fI0077\fP. +The umask to set for the server. Default is \fI0077\fP\&. .UNINDENT .SH SERVER PLUGINS .sp This section has a listing of all the plugins currently provided with Bcfg2. -.SS Account Plugin -.sp -The account plugin manages authentication data, including the following. -.INDENT 0.0 -.IP \(bu 2 -\fB/etc/passwd\fP -.IP \(bu 2 -\fB/etc/group\fP -.IP \(bu 2 -\fB/etc/security/limits.conf\fP -.IP \(bu 2 -\fB/etc/sudoers\fP -.IP \(bu 2 -\fB/root/.ssh/authorized_keys\fP -.UNINDENT -.SS Base Plugin +.SS ACL Plugin .sp -The Base plugin is a structure plugin that provides the ability -to add lists of unrelated entries into client configuration entry -inventories. Base works much like Bundler in its file format. This -structure plugin is good for the pile of independent configs needed for -most actual systems. +The ACL plugin controls which hosts can make which XML\-RPC calls. .SS Bundler Plugin .sp The Bundler plugin is used to describe groups of inter\-dependent @@ -252,19 +224,16 @@ contents for clients. In its simplest form, the Cfg repository is just a directory tree modeled off of the directory tree on your client machines. -.SS Cvs Plugin (experimental) +.SS Cvs Plugin .sp The Cvs plugin allows you to track changes to your Bcfg2 repository using a Concurrent version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. -.SS Darcs Plugin (experimental) +.SS Darcs Plugin .sp The Darcs plugin allows you to track changes to your Bcfg2 repository using a Darcs version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. -.SS DBStats Plugin -.sp -Direct to database statistics plugin. .SS Decisions Plugin .sp The Decisions plugin has support for a centralized set of per\-entry @@ -282,11 +251,6 @@ .sp The Deps plugin allows you to make a series of assertions like "Package X requires Package Y (and optionally also Package Z etc.)" -.SS Editor Plugin -.sp -The Editor plugin attempts to allow you to partially manage -configuration for a file. Its use is not recommended and not well -documented. .SS FileProbes Plugin .sp The FileProbes plugin allows you to probe a client for a file, which is @@ -303,6 +267,10 @@ The Git plugin allows you to track changes to your Bcfg2 repository using a Git version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. +.SS GroupLogic Plugin +.sp +The GroupLogic plugin lets you flexibly assign group membership with a +Genshi template. .SS GroupPatterns Plugin .sp The GroupPatterns plugin is a connector that can assign clients group @@ -311,17 +279,14 @@ .sp The Guppy plugin is used to trace memory leaks within the bcfg2\-server process using Guppy. -.SS Hg Plugin (experimental) +.SS Hg Plugin .sp The Hg plugin allows you to track changes to your Bcfg2 repository using a Mercurial version control backend. Currently, it enables you to get revision information out of your repository for reporting purposes. -.SS Hostbase Plugin -.sp -The Hostbase plugin is an IP management system built on top of Bcfg2. .SS Ldap Plugin .sp -The Ldap plugin makes it possible to fetch data from an LDAP directory, +The Ldap plugin makes it possible to fetch data from a LDAP directory, process it and attach it to your metadata. .SS Metadata Plugin .sp @@ -331,7 +296,7 @@ .sp The NagiosGen plugin dynamically generates Nagios configuration files based on Bcfg2 data. -.SS Ohai Plugin (experimental) +.SS Ohai Plugin .sp The Ohai plugin is used to detect information about the client operating system. The data is reported back to the server using JSON. @@ -371,10 +336,10 @@ dynamic reporting system. .SS Rules Plugin .sp -The Rules plugin provides literal configuration entries that resolve the -abstract configuration entries normally found in the Bundler and Base -plugins. The literal entries in Rules are suitable for consumption by -the appropriate client drivers. +The Rules plugin provides literal configuration entries that resolve +the abstract configuration entries normally found in Bundler. The +literal entries in Rules are suitable for consumption by the +appropriate client drivers. .SS SEModules Plugin .sp The SEModules plugin provides a way to distribute SELinux modules via @@ -382,37 +347,16 @@ .SS ServiceCompat Plugin .sp The ServiceCompat plugin converts service entries for older clients. -.SS Snapshots Plugin -.sp -The Snapshots plugin stores various aspects of a client’s state when the -client checks in to the server. .SS SSHbase Plugin .sp The SSHbase generator plugin manages ssh host keys (both v1 and v2) for hosts. It also manages the ssh_known_hosts file. It can integrate host keys from other management domains and similarly export its keys. -.SS SSLCA Plugin -.sp -The SSLCA plugin is designed to handle creation of SSL privatekeys and -certificates on request. -.SS Statistics -.sp -The Statistics plugin is deprecated (see Reporting). .SS Svn Plugin .sp The Svn plugin allows you to track changes to your Bcfg2 repository using a Subversion backend. Currently, it enables you to get revision information out of your repository for reporting purposes. -.SS TCheetah Plugin -.sp -The TCheetah plugin allows you to use the cheetah templating system to -create files. It also allows you to include the results of probes -executed on the client in the created files. -.SS TGenshi Plugin -.sp -The TGenshi plugin allows you to use the Genshi templating system to -create files. It also allows you to include the results of probes -executed on the client in the created files. .SS Trigger Plugin .sp The Trigger plugin provides a method for calling external scripts when @@ -504,7 +448,7 @@ sets the password to use to connect to the server. .TP .B protocol -Communication protocol to use. Defaults to xmlrpc/ssl. +Communication protocol to use. Defaults to xmlrpc/tlsv1. .TP .B retries A client\-only option. Number of times to retry network @@ -610,6 +554,10 @@ .INDENT 3.5 .INDENT 0.0 .TP +.B backends +Comma separated list of backends for the dependency resolution. +Default is "Yum,Apt,Pac,Pkgng". +.TP .B resolver Enable dependency resolution. Default is 1 (true). .TP @@ -625,7 +573,7 @@ .TP .B gpg_keypath The path on the client where RPM GPG keys will be copied before -they are imported on the client. Default is \fB/etc/pki/rpm\-gpg\fP. +they are imported on the client. Default is \fB/etc/pki/rpm\-gpg\fP\&. .TP .B version Set the version attribute used when binding Packages. Default is @@ -684,7 +632,7 @@ .TP .B path Custom path for backups created in paranoid mode. The default is -in \fB/var/cache/bcfg2\fP. +in \fB/var/cache/bcfg2\fP\&. .TP .B max_copies Specify a maximum number of copies for the server to keep when @@ -693,28 +641,11 @@ .UNINDENT .UNINDENT .UNINDENT -.SH SNAPSHOTS OPTIONS +.SH SSL CA OPTIONS .sp -Specified in the \fB[snapshots]\fP section. These options control the -server snapshots functionality. -.INDENT 0.0 -.INDENT 3.5 -.INDENT 0.0 -.TP -.B driver -sqlite -.TP -.B database -The name of the database to use for statistics data. -.sp -e.g.: \fB$REPOSITORY_DIR/etc/bcfg2.sqlite\fP -.UNINDENT -.UNINDENT -.UNINDENT -.SH SSLCA OPTIONS -.sp -These options are necessary to configure the SSLCA plugin and can be -found in the \fB[sslca_default]\fP section of the configuration file. +These options are necessary to configure the SSL CA feature of the Cfg +plugin and can be found in the \fB[sslca_default]\fP section of the +configuration file. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 @@ -746,7 +677,7 @@ .INDENT 0.0 .TP .B engine -The database engine used by the statistics module. One of the +The database engine used by server plugins. One of the following: .INDENT 7.0 .INDENT 3.5 @@ -763,9 +694,9 @@ .UNINDENT .TP .B name -The name of the database to use for statistics data. If +The name of the database to use for server data. If \(aqdatabase_engine\(aq is set to \(aqsqlite3\(aq this is a file path to -the sqlite file and defaults to \fB$REPOSITORY_DIR/etc/brpt.sqlite\fP. +the sqlite file and defaults to \fB$REPOSITORY_DIR/etc/bcfg2.sqlite\fP\&. .TP .B user User for database connections. Not used for sqlite3. @@ -780,9 +711,50 @@ Port for database connections. Not used for sqlite3. .TP .B options -Various options for the database connection. The value is -expected as multiple key=value pairs, separated with commas. -The concrete value depends on the database engine. +Various options for the database connection. The value expected +is the literal value of the django OPTIONS setting. +.TP +.B reporting_engine +The database engine used by the Reporting plugin. One of the +following: +.INDENT 7.0 +.INDENT 3.5 +.sp +.nf +.ft C + postgresql + mysql + sqlite3 + ado_mssql + +If reporting_engine is not specified, the Reporting plugin uses +the same database as the other server plugins. +.ft P +.fi +.UNINDENT +.UNINDENT +.TP +.B reporting_name +The name of the database to use for reporting data. If +\(aqdatabase_engine\(aq is set to \(aqsqlite3\(aq this is a file path to +the sqlite file and defaults to +\fB$REPOSITORY_DIR/etc/reporting.sqlite\fP. +.TP +.B reporting_user +User for reporting database connections. Not used for sqlite3. +.TP +.B reporting_password +Password for reporting database connections. Not used for sqlite3. +.TP +.B reporting_host +Host for reporting database connections. Not used for sqlite3. +.TP +.B reporting_port +Port for reporting database connections. Not used for sqlite3. +.TP +.B reporting_options +Various options for the reporting database connection. The value +expected is the literal value of the django OPTIONS setting. .UNINDENT .UNINDENT .UNINDENT @@ -802,6 +774,15 @@ .TP .B web_debug Turn on Django debugging. +.TP +.B max_children +Maximum number of children for the reporting collector. Use 0 to +disable the limit. (default is 0) +.TP +.B django_settings +Arbitrary options for the Django installation. The value expected +is a literal python dictionary, that is merged with the already set +django settings. .UNINDENT .UNINDENT .UNINDENT diff -Nru bcfg2-1.3.5/man/bcfg2-server.8 bcfg2-1.4.0~pre2+git141-g6d40dace6358/man/bcfg2-server.8 --- bcfg2-1.3.5/man/bcfg2-server.8 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/man/bcfg2-server.8 2017-01-10 19:18:17.000000000 +0000 @@ -34,7 +34,7 @@ .sp \fBbcfg2\-server\fP [\-d] [\-v] [\-C \fIconfigfile\fP] [\-D \fIpidfile\fP] [\-E \fIencoding\fP] [\-Q \fIrepo path\fP] [\-S \fIserver url\fP] [\-o \fIlogfile\fP] [\-x -\fIpassword\fP] [\-\-ssl\-key=\fIssl key\fP] +\fIpassword\fP] [\-\-ssl\-key=\fIssl key\fP] [\-\-no\-fam\-blocking] .SH DESCRIPTION .sp \fBbcfg2\-server\fP is the daemon component of Bcfg2 which serves @@ -70,9 +70,12 @@ .TP .BI \-\-ssl\-key\fB= key Specify the path to the SSL key. +.TP +.BI \-\-no\-fam\-blocking +Synonym for fam_blocking = False in bcfg2.conf .UNINDENT .SH SEE ALSO .sp -\fIbcfg2(1)\fP, \fIbcfg2\-lint(8)\fP +\fIbcfg2(1)\fP, \fIbcfg2\-lint(8)\fP, \fIbcfg2.conf(5)\fP .\" Generated by docutils manpage writer. . diff -Nru bcfg2-1.3.5/misc/bcfg2-selinux.spec bcfg2-1.4.0~pre2+git141-g6d40dace6358/misc/bcfg2-selinux.spec --- bcfg2-1.3.5/misc/bcfg2-selinux.spec 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/misc/bcfg2-selinux.spec 2017-01-10 19:18:17.000000000 +0000 @@ -12,11 +12,11 @@ # # Don't forget to change the Release: tag below to something like 0.1 #%%global _rc 1 -#%%global _pre 2 +%global _pre pre2 %global _pre_rc %{?_pre:.pre%{_pre}}%{?_rc:.rc%{_rc}} Name: bcfg2-selinux -Version: 1.3.5 +Version: 1.4.0 Release: 1%{?_pre_rc}%{?dist} Summary: Bcfg2 Client and Server SELinux policy @@ -33,7 +33,7 @@ License: BSD URL: http://bcfg2.org Source0: ftp://ftp.mcs.anl.gov/pub/bcfg/%{name}-%{version}%{?_pre_rc}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) +BuildRoot: %{_tmppath}/%{name}-%{version}%{?_pre_rc}-%{release}-root-%(%{__id_u} -n) BuildArch: noarch BuildRequires: checkpolicy, selinux-policy-devel, hardlink diff -Nru bcfg2-1.3.5/misc/bcfg2.spec bcfg2-1.4.0~pre2+git141-g6d40dace6358/misc/bcfg2.spec --- bcfg2-1.3.5/misc/bcfg2.spec 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/misc/bcfg2.spec 2017-01-10 19:18:17.000000000 +0000 @@ -18,9 +18,11 @@ # characters from the appropriate line below. # # Don't forget to change the Release: tag below to something like 0.1 -#%%global _rc 1 -#%%global _pre 2 -%global _pre_rc %{?_pre:.pre%{_pre}}%{?_rc:.rc%{_rc}} +#%%global _rc rc1 +%global _pre pre2 +%global _nightly 1 +%global _date %(date +%Y%m%d) +%global _pre_rc %{?_pre:%{_pre}}%{?_rc:%{_rc}} # cherrypy 3.3 actually doesn't exist yet, but 3.2 has bugs that # prevent it from working: @@ -29,8 +31,8 @@ Name: bcfg2 -Version: 1.3.5 -Release: 1%{?_pre_rc}%{?dist} +Version: 1.4.0 +Release: 0.1.%{?_nightly:nightly.%{_date}}%{?_pre_rc}%{?dist} Summary: A configuration management system %if 0%{?suse_version} @@ -46,7 +48,7 @@ Source1: http://www.w3.org/2001/XMLSchema.xsd %if %{?rhel}%{!?rhel:10} <= 5 || 0%{?suse_version} # EL5 and OpenSUSE require the BuildRoot tag -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) +BuildRoot: %{_tmppath}/%{name}-%{version}%{?_pre_rc}-%{release}-root-%(%{__id_u} -n) %endif BuildArch: noarch @@ -54,6 +56,8 @@ BuildRequires: python-devel BuildRequires: python-lxml BuildRequires: python-boto +BuildRequires: python-argparse +BuildRequires: python-jinja2 %if 0%{?suse_version} BuildRequires: python-M2Crypto BuildRequires: python-Genshi @@ -76,15 +80,14 @@ BuildRequires: python-ssl %else # rhel > 5 # EL5 lacks python-mock, so test suite is disabled -BuildRequires: python-sqlalchemy BuildRequires: python-nose BuildRequires: mock BuildRequires: m2crypto # EPEL uses the properly-named python-django starting with EPEL7 %if 0%{?rhel} && 0%{?rhel} > 6 -BuildRequires: python-django +BuildRequires: python-django >= 1.3 %else -BuildRequires: Django +BuildRequires: Django >= 1.3 %endif BuildRequires: python-genshi BuildRequires: python-cheetah @@ -136,6 +139,8 @@ Requires: python-ssl %endif Requires: libselinux-python +Requires: pylibacl +Requires: python-argparse %if 0%{?fedora} >= 16 Requires(post): systemd-units @@ -191,6 +196,7 @@ %endif Requires: bcfg2 = %{version}-%{release} Requires: python-lxml >= 1.2.1 +Requires: python-genshi %if 0%{?suse_version} Requires: python-pyinotify Requires: python-python-daemon @@ -257,10 +263,8 @@ Requires: bcfg2 = %{version}-%{release} Requires: bcfg2-server = %{version}-%{release} -# cherrypy 3.3 actually doesn't exist yet, but 3.2 has bugs that -# prevent it from working: -# https://bitbucket.org/cherrypy/cherrypy/issue/1154/assertionerror-in-recv-when-ssl-is-enabled -Requires: python-cherrypy > 3.3 +# https://bitbucket.org/cherrypy/cherrypy/issue/1068/file-upload-crashes-when-using-https +Requires: python-cherrypy >= 3.2.6 %description server-cherrypy Bcfg2 helps system administrators produce a consistent, reproducible, @@ -295,22 +299,23 @@ %package web Summary: Bcfg2 Web Reporting Interface +Requires: bcfg2-server = %{version}-%{release} +Requires: httpd %if 0%{?suse_version} Group: System/Management -Requires: python-django >= 1.2 +Requires: python-django >= 1.3 Requires: python-django-south >= 0.7 %else Group: System Tools # EPEL uses the properly-named python-django starting with EPEL7 %if 0%{?rhel} && 0%{?rhel} > 6 -Requires: python-django +Requires: python-django > 1.3 %else -Requires: Django >= 1.2 +Requires: Django >= 1.3 Requires: Django-south >= 0.7 %endif Requires: bcfg2-server %endif -Requires: httpd %if "%{_vendor}" == "redhat" Requires: mod_wsgi %global apache_conf %{_sysconfdir}/httpd @@ -443,7 +448,7 @@ # Get rid of extraneous shebangs for f in `find src/lib -name \*.py` do - sed -i -e '/^#!/,1d' $f + %{__sed} -i -e '/^#!/,1d' $f done sed -i "s/apache2/httpd/g" misc/apache/bcfg2.conf @@ -559,7 +564,7 @@ %if 0%{?suse_version} %fillup_and_insserv -f bcfg2 %else - /sbin/chkconfig --add bcfg2 + /sbin/chkconfig --add bcfg2 %endif fi %endif @@ -573,7 +578,7 @@ %if 0%{?suse_version} %fillup_and_insserv -f bcfg2-server %else - /sbin/chkconfig --add bcfg2-server + /sbin/chkconfig --add bcfg2-server %endif fi %endif @@ -688,8 +693,7 @@ %{python_sitelib}/Bcfg2/Client %{python_sitelib}/Bcfg2/Compat.py* %{python_sitelib}/Bcfg2/Logger.py* -%{python_sitelib}/Bcfg2/Options.py* -%{python_sitelib}/Bcfg2/Proxy.py* +%{python_sitelib}/Bcfg2/Options %{python_sitelib}/Bcfg2/Utils.py* %{python_sitelib}/Bcfg2/version.py* %if 0%{?suse_version} @@ -711,20 +715,17 @@ %config(noreplace) %{_sysconfdir}/sysconfig/bcfg2-server %{_sbindir}/bcfg2-* %dir %{_localstatedir}/lib/%{name} -%{python_sitelib}/Bcfg2/Cache.py* -%{python_sitelib}/Bcfg2/Encryption.py* -%{python_sitelib}/Bcfg2/SSLServer.py* -%{python_sitelib}/Bcfg2/Statistics.py* -%{python_sitelib}/Bcfg2/settings.py* +%{python_sitelib}/Bcfg2/DBSettings.py* %{python_sitelib}/Bcfg2/Server %{python_sitelib}/Bcfg2/Reporting %{python_sitelib}/Bcfg2/manage.py* +%if %{build_cherry_py} %exclude %{python_sitelib}/Bcfg2/Server/CherryPyCore.py* +%endif %dir %{_datadir}/bcfg2 %{_datadir}/bcfg2/schemas %{_datadir}/bcfg2/xsl-transforms -%{_datadir}/bcfg2/Hostbase %if 0%{?suse_version} %{_sbindir}/rcbcfg2-server %config(noreplace) /var/adm/fillup-templates/sysconfig.bcfg2-server diff -Nru bcfg2-1.3.5/osx/bcfg2.conf bcfg2-1.4.0~pre2+git141-g6d40dace6358/osx/bcfg2.conf --- bcfg2-1.3.5/osx/bcfg2.conf 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/osx/bcfg2.conf 2017-01-10 19:18:17.000000000 +0000 @@ -1,5 +1,4 @@ [communication] -protocol = xmlrpc/ssl password = foobat # certificate = /etc/bcfg2.key # key = /etc/bcfg2.key diff -Nru bcfg2-1.3.5/osx/Introduction.txt bcfg2-1.4.0~pre2+git141-g6d40dace6358/osx/Introduction.txt --- bcfg2-1.3.5/osx/Introduction.txt 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/osx/Introduction.txt 2017-01-10 19:18:17.000000000 +0000 @@ -11,6 +11,6 @@ Bcfg2 is fairly portable. It has been successfully run on: AIX, FreeBSD, OpenBSD, Mac OS X, OpenSolaris, Solaris - Many GNU/Linux distributions, including ArchLinux Blag, CentOS, Debian, Fedora, Gentoo, gNewSense, Mandriva, openSUSE, Red Hat/RHEL, SuSE/SLES, Trisquel, and Ubuntu. + Many GNU/Linux distributions, including Arch Linux, Blag, CentOS, Debian, Fedora, Gentoo, gNewSense, Mandriva, openSUSE, Red Hat/RHEL, SuSE/SLES, Trisquel, and Ubuntu. Bcfg2 should run on any POSIX compatible operating system, however direct support for an operating system's package and service formats are limited by the currently available client tools (new client tools are pretty easy to add). There is also an incomplete but more exact list of platforms on which Bcfg2 works. diff -Nru bcfg2-1.3.5/osx/macports/files/patch-setup.py.diff bcfg2-1.4.0~pre2+git141-g6d40dace6358/osx/macports/files/patch-setup.py.diff --- bcfg2-1.3.5/osx/macports/files/patch-setup.py.diff 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/osx/macports/files/patch-setup.py.diff 2017-01-10 19:18:17.000000000 +0000 @@ -1,6 +1,6 @@ --- setup.py 2010-11-15 15:30:28.000000000 -0600 +++ setup.py.macports 2010-11-18 19:06:49.155292524 -0600 -@@ -11,47 +11,22 @@ +@@ -11,38 +11,21 @@ setup(cmdclass=cmdclass, name="Bcfg2", version="1.1.1", @@ -14,16 +14,12 @@ "Bcfg2.Client.Tools", - 'Bcfg2.Server', - "Bcfg2.Server.Admin", -- "Bcfg2.Server.Hostbase", -- "Bcfg2.Server.Hostbase.hostbase", - "Bcfg2.Server.Plugins", - "Bcfg2.Server.Reports", - "Bcfg2.Server.Reports.reports", - "Bcfg2.Server.Reports.reports.templatetags", -- "Bcfg2.Server.Snapshots", ], + py_modules = ["Bcfg2.Options", -+ "Bcfg2.Proxy", + "Bcfg2.Logger", + ], package_dir = {'Bcfg2':'src/lib'}, @@ -51,11 +47,5 @@ - glob('src/lib/Server/Reports/reports/templates/clients/*')), - ('share/bcfg2/Reports/templates/config_items', - glob('src/lib/Server/Reports/reports/templates/config_items/*')), -- ('share/bcfg2/Hostbase/templates', -- glob('src/lib/Server/Hostbase/hostbase/webtemplates/*.*')), -- ('share/bcfg2/Hostbase/templates/hostbase', -- glob('src/lib/Server/Hostbase/hostbase/webtemplates/hostbase/*')), -- ('share/bcfg2/Hostbase/repo', -- glob('src/lib/Server/Hostbase/templates/*')), ] ) diff -Nru bcfg2-1.3.5/osx/macports/Portfile bcfg2-1.4.0~pre2+git141-g6d40dace6358/osx/macports/Portfile --- bcfg2-1.3.5/osx/macports/Portfile 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/osx/macports/Portfile 2017-01-10 19:18:17.000000000 +0000 @@ -5,7 +5,7 @@ PortGroup python26 1.0 name bcfg2 -version 1.3.5 +version 1.4.0pre2 categories sysutils python maintainers gmail.com:sol.jerome license BSD diff -Nru bcfg2-1.3.5/osx/Makefile bcfg2-1.4.0~pre2+git141-g6d40dace6358/osx/Makefile --- bcfg2-1.3.5/osx/Makefile 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/osx/Makefile 2017-01-10 19:18:17.000000000 +0000 @@ -5,8 +5,7 @@ POSTFLIGHT = postflight PKGROOT = bcfg2pkg PKGTMP = bcfg2tmp -FILTERS = --filter Hostbase \ ---filter Reports \ +FILTERS = --filter Reports \ --filter Server \ --filter xsd \ --filter xsl \ @@ -29,9 +28,9 @@ # an Info.plist file for packagemaker to look at for package creation # and substitute the version strings. Major/Minor versions can only be # integers (e.g. "1" and "00" for bcfg2 version 1.0.0. -BCFGVER = 1.3.5 +BCFGVER = 1.4.0pre2 MAJOR = 1 -MINOR = 35 +MINOR = 40 default: clean client diff -Nru bcfg2-1.3.5/README bcfg2-1.4.0~pre2+git141-g6d40dace6358/README --- bcfg2-1.3.5/README 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/README 2017-01-10 19:18:17.000000000 +0000 @@ -13,7 +13,7 @@ Bcfg2 is fairly portable. It has been successfully run on: * AIX, FreeBSD, OpenBSD Mac OS X, OpenSolaris, Solaris -* Many GNU/Linux distributions, including ArchLinux, Blag, CentOS, +* Many GNU/Linux distributions, including Arch Linux, Blag, CentOS, Debian, Fedora, Gentoo, gNewSense, Mandriva, openSUSE, Red Hat/RHEL, SuSE/SLES, Trisquel, and Ubuntu. diff -Nru bcfg2-1.3.5/redhat/scripts/bcfg2-server.init bcfg2-1.4.0~pre2+git141-g6d40dace6358/redhat/scripts/bcfg2-server.init --- bcfg2-1.3.5/redhat/scripts/bcfg2-server.init 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/redhat/scripts/bcfg2-server.init 2017-01-10 19:18:17.000000000 +0000 @@ -27,7 +27,7 @@ test -f /etc/sysconfig/$prog && . /etc/sysconfig/$prog if [ "$BCFG2_SERVER_ENABLED" -eq 0 ] ; then - failure $"bcfg2-server is disabled - see /etc/sysconfig/bcfg2-server" + failure $"bcfg2-server is disabled - see /etc/sysconfig/$prog" echo exit 0 fi diff -Nru bcfg2-1.3.5/reports/reports.wsgi bcfg2-1.4.0~pre2+git141-g6d40dace6358/reports/reports.wsgi --- bcfg2-1.3.5/reports/reports.wsgi 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/reports/reports.wsgi 2017-01-10 19:18:17.000000000 +0000 @@ -1,9 +1,23 @@ import os -import Bcfg2.settings -os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.settings' -import django.core.handlers.wsgi +import Bcfg2.Options +import Bcfg2.DBSettings + +config_parsed = False def application(environ, start_response): - if 'BCFG2_CONFIG_FILE' in environ: - Bcfg2.settings.read_config(cfile=environ['BCFG2_CONFIG_FILE']) - return django.core.handlers.wsgi.WSGIHandler()(environ, start_response) + global config_parsed + + # with wsgi, the environment isn't present in os.environ, but + # is passwd to the application function + if 'BCFG2_CONFIG_FILE' in environ: + os.environ['BCFG2_CONFIG_FILE'] = environ['BCFG2_CONFIG_FILE'] + if not config_parsed: + Bcfg2.Options.get_parser().parse() + config_parsed = True + + try: + from django.core.wsgi import get_wsgi_application + return get_wsgi_application()(environ, start_response) + except ImportError: + import django.core.handlers.wsgi + return django.core.handlers.wsgi.WSGIHandler()(environ, start_response) diff -Nru bcfg2-1.3.5/reports/site_media/bcfg2_base.css bcfg2-1.4.0~pre2+git141-g6d40dace6358/reports/site_media/bcfg2_base.css --- bcfg2-1.3.5/reports/site_media/bcfg2_base.css 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/reports/site_media/bcfg2_base.css 2017-01-10 19:18:17.000000000 +0000 @@ -159,6 +159,9 @@ .modified-lineitem { background: #FFEC8B; } +.stale-lineitem { + background: #CCCCCC; +} table.grid-view { border: solid 1px #98DBCC; diff -Nru bcfg2-1.3.5/schemas/acl-ip.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/acl-ip.xsd --- bcfg2-1.3.5/schemas/acl-ip.xsd 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/acl-ip.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,56 @@ + + + + Schema for IP-based client ACLs: + :ref:`server-plugins-misc-acl` ``ip.xml`` + + + + + + + + The name of the XML-RPC method to allow or deny. Limited + wildcards are supported. + + + + + + + The IP address to match against. This is an exact match + unless :xml:attribute:`IPACLType:netmask` is defined. If + this is not defined, all addresses match the given rule. + + + + + + + If this is defined, then it is combined with + :xml:attribute:`IPACLType:address` to produce a CIDR range, + which is used for matching instead of exact matching based + only on IP address. This can be either an integer netmask + (e.g., ``netmask="24"``) or a dotted-quad (e.g., + ``netmask="255.255.255.0"``). + + + + + + + + + Top-level tag for describing metadata-based client ACLs. + + + + + + + + + + + + diff -Nru bcfg2-1.3.5/schemas/acl-metadata.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/acl-metadata.xsd --- bcfg2-1.3.5/schemas/acl-metadata.xsd 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/acl-metadata.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,87 @@ + + + + Schema for metadata-based client ACLs: + :ref:`server-plugins-misc-acl` ``metadata.xml`` + + + + + + + + + An **MetadataACLGroupType** is a tag used to provide logic. + Child entries of a MetadataACLGroupType tag only apply to + machines that match the condition specified -- either + membership in a group, or a matching client name. + :xml:attribute:`MetadataACLGroupType:negate` can be set to + negate the sense of the match. + + + + + + + The name of the client or group to match on. Child entries + will only apply to this client or group (unless + :xml:attribute:`MetadataACLGroupType:negate` is set). + + + + + + + Negate the sense of the match, so that child entries only + apply to a client if it is not a member of the given group + or does not have the given name. + + + + + + + + + + + The name of the XML-RPC method to allow or deny. Limited + wildcards are supported. + + + + + + + + + + Top-level tag for describing metadata-based client ACLs. + + + + + + + Override the global lax_decryption setting in + ``bcfg2.conf``. + + + + + + + + + + + + + + + + + + diff -Nru bcfg2-1.3.5/schemas/acl.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/acl.xsd --- bcfg2-1.3.5/schemas/acl.xsd 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/acl.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,22 @@ + + + + + acl config schema for bcfg2 + Matt Schwager + + + + + + + + + + + + + diff -Nru bcfg2-1.3.5/schemas/authorizedkeys.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/authorizedkeys.xsd --- bcfg2-1.3.5/schemas/authorizedkeys.xsd 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/authorizedkeys.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -1,4 +1,5 @@ - + Schema for :ref:`server-plugins-generators-cfg-sshkeys` @@ -6,6 +7,9 @@ + + @@ -18,9 +22,11 @@ + + @@ -40,6 +46,7 @@ + @@ -57,7 +64,6 @@ - @@ -87,10 +93,10 @@ + - @@ -124,6 +130,7 @@ + @@ -150,29 +157,29 @@ - + - **Deprecated** way to specify options for public key - authentication and connection. See :manpage:`sshd(8)` for - details on allowable parameters. + Top-level tag for describing a generated SSH key pair. - + + + + + + + + + + + Override the global lax_decryption setting in + ``bcfg2.conf``. + + + + - - - - Top-level tag for describing a generated SSH key pair. - - - - - - - - - - + diff -Nru bcfg2-1.3.5/schemas/bundle.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/bundle.xsd --- bcfg2-1.3.5/schemas/bundle.xsd 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/bundle.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -35,10 +35,7 @@ Abstract implementation of a Path entry. The entry will - either be handled by Cfg, TGenshi, or another - Generator plugin; or handled by Rules, in which case - the full specification of this entry will be included in - Rules. + be handled by a Generator plugin, like Cfg or Rules. @@ -72,15 +69,6 @@ - - - - PostInstall entries are deprecated in favor of Action - entries. Actions can do everything PostInstall entries can - do and more. - - - @@ -275,6 +263,13 @@ + + + + Nesting Bundle tags to specify dependencies to other bundles. + + + @@ -312,6 +307,28 @@ + + + + + The name of the required bundle. + + + + + + + Specify how to handle modifications in the required + bundle. You can either ignore the modifications (this + is the default) or you can inherit the modifications + so that Services in the current Bundle are restarted + if the required Bundle is modified. + + + + + + @@ -323,11 +340,22 @@ - + - The name of the bundle. This must match the bundle - filename, minus the extension. + If set to ``true``, indicates that the bundle is a + collection of independent entries, and that service restarts + and modified actions should not be performed. See + :ref:`server-plugins-structures-bundler-magic` for more. + + + + + + + **Deprecated.** The name of the bundle. If present, this + must match the bundle filename, minus the extension. + Specifying the name explicitly is deprecated. @@ -352,6 +380,14 @@ + + + + Override the global lax_decryption setting in + ``bcfg2.conf``. + + + diff -Nru bcfg2-1.3.5/schemas/clients.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/clients.xsd --- bcfg2-1.3.5/schemas/clients.xsd 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/clients.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -60,7 +60,7 @@ - Profile group naem to associate this client with. + Profile group name to associate this client with. diff -Nru bcfg2-1.3.5/schemas/decisions.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/decisions.xsd --- bcfg2-1.3.5/schemas/decisions.xsd 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/decisions.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -1,5 +1,6 @@ - - + + decision list schema for bcfg2 @@ -7,16 +8,72 @@ - - - - - - - - - - - - + + + + + + A **DecisionsGroupType** is a tag used to provide logic. + Child entries of a DecisionsGroupType tag only apply to + machines that match the condition specified -- either + membership in a group, or a matching client name. + :xml:attribute:`DecisionsGroupType:negate` can be set to + negate the sense of the match. + + + + + + + + + + + + + The name of the client or group to match on. Child entries + will only apply to this client or group (unless + :xml:attribute:`DecisionsGroupType:negate` is set). + + + + + + + Negate the sense of the match, so that child entries only + apply to a client if it is not a member of the given group + or does not have the given name. + + + + + + + + + + + + + + + + + + + + + + + + Override the global lax_decryption setting in + ``bcfg2.conf``. + + + + + + + diff -Nru bcfg2-1.3.5/schemas/defaults.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/defaults.xsd --- bcfg2-1.3.5/schemas/defaults.xsd 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/defaults.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -35,6 +35,14 @@ + + + + Override the global lax_decryption setting in + ``bcfg2.conf``. + + + diff -Nru bcfg2-1.3.5/schemas/fileprobes.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/fileprobes.xsd --- bcfg2-1.3.5/schemas/fileprobes.xsd 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/fileprobes.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -1,4 +1,5 @@ - + FileProbes plugin config schema for bcfg2 @@ -6,29 +7,44 @@ + + + + + + + + + + Override the global lax_decryption setting in + ``bcfg2.conf``. + + + diff -Nru bcfg2-1.3.5/schemas/info.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/info.xsd --- bcfg2-1.3.5/schemas/info.xsd 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/info.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -92,6 +92,7 @@ + @@ -121,19 +122,28 @@ - + Top-level tag for ``info.xml``. - - - - - - - - - + + + + + + + + + + + Override the global lax_decryption setting in + ``bcfg2.conf``. + + + + + + diff -Nru bcfg2-1.3.5/schemas/nagiosgen.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/nagiosgen.xsd --- bcfg2-1.3.5/schemas/nagiosgen.xsd 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/nagiosgen.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -1,4 +1,5 @@ - + NagiosGen config schema for bcfg2 @@ -6,26 +7,42 @@ + + + + + + + + + + + Override the global lax_decryption setting in + ``bcfg2.conf``. + + + diff -Nru bcfg2-1.3.5/schemas/packages.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/packages.xsd --- bcfg2-1.3.5/schemas/packages.xsd 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/packages.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -1,4 +1,5 @@ - + packages config schema for bcfg2 @@ -8,12 +9,15 @@ + + @@ -40,6 +44,7 @@ + @@ -58,6 +63,7 @@ + @@ -167,7 +173,7 @@ - + Include ``deb-src`` lines in the generated APT @@ -211,10 +217,20 @@ + + + + Specifiy an explicit name for the source and do not generate + it automatically. + + + + + @@ -222,16 +238,27 @@ + + + + + + Override the global lax_decryption setting in + ``bcfg2.conf``. + + + + diff -Nru bcfg2-1.3.5/schemas/pathentry.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/pathentry.xsd --- bcfg2-1.3.5/schemas/pathentry.xsd 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/pathentry.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -12,7 +12,34 @@ schemaLocation="genshi.xsd"/> - + + + Abstract description of a path to be installed. This can + either be a single explicit path (e.g., ``<Path + name="/etc/foo.conf"/>``) or a glob that matches a set of + paths (e.g., ``<Path glob="/etc/foo/*"/>``). Path + globbing may not work for some dynamically handled Path + entries, for instance :ref:`Packages client configs + <generating-client-configs>`. + + + + + + Install the single named path. Either ``name`` or + :xml:attribute:`PathEntry:glob` must be specified. + + + + + + + Install all Cfg entries matching the given glob. Either + ``glob`` or :xml:attribute:`PathEntry:name` must be + specified. + + + diff -Nru bcfg2-1.3.5/schemas/pkgtype.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/pkgtype.xsd --- bcfg2-1.3.5/schemas/pkgtype.xsd 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/pkgtype.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -31,10 +31,9 @@ Install the named package group. Package groups are only - supported for Yum :xml:element:`Source` repositories, and - only if the :ref:`yum libraries - <native-yum-libraries>` are in use. Either ``group`` - or :xml:attribute:`PackageStructure:name` must be specified. + supported for Pac and Yum :xml:element:`Source` + repositories. Either ``group`` or + :xml:attribute:`PackageStructure:name` must be specified. @@ -54,6 +53,15 @@ + + + + Whether also the recommended packages should be installed. + This is currently only used with the :ref:`APT + <client-tools-apt>` driver. + + + @@ -146,38 +154,33 @@ - + - If this is set to any value other than "install", - package installation will be suppressed with the - :ref:`YUM24 and RPM <client-tools-yum>` drivers. + Whether or not to install missing packages. This is + only honored by the the :ref:`RPM + <client-tools-yum>` driver. - + - If this is set to any value other than "upgrade", a - package that has the incorrect version installed will - not be fixed with the :ref:`YUM24 and RPM - <client-tools-yum>` drivers. Note that - "upgrade" is misleading; if a package is installed - that is newer than the desired version, it will not be - downgraded if this attribute is set to anything other - than "upgrade". + Whether or not to upgrade or downgrade packages that + are installed, but have the wrong version. This is + only honored by the :ref:`RPM + <client-tools-yum>` driver. - + - If this is set to any value other than "reinstall", a - package that fails package verification will not be - reinstalled with the :ref:`YUM24 and RPM - <client-tools-yum>` drivers. + Whether or not to reinstall packages that fail + verification. This is only honored by the :ref:`RPM + <client-tools-yum>` driver. diff -Nru bcfg2-1.3.5/schemas/privkey.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/privkey.xsd --- bcfg2-1.3.5/schemas/privkey.xsd 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/privkey.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -1,16 +1,20 @@ - + Schema for :ref:`server-plugins-generators-cfg-sshkeys` ``privkey.xml`` + + - An **PrivateKeyGroupType** is a tag used to provide logic. + A **PrivateKeyGroupType** is a tag used to provide logic. Child entries of a PrivateKeyGroupType tag only apply to machines that match the condition specified -- either membership in a group, or a matching client name. @@ -19,6 +23,7 @@ + @@ -42,6 +47,7 @@ + @@ -72,6 +78,7 @@ + @@ -97,6 +104,7 @@ + @@ -107,6 +115,7 @@ + @@ -135,14 +144,15 @@ - + - Override the global strict/lax decryption setting in + Override the global lax_decryption setting in ``bcfg2.conf``. + diff -Nru bcfg2-1.3.5/schemas/pubkey.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/pubkey.xsd --- bcfg2-1.3.5/schemas/pubkey.xsd 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/pubkey.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -1,10 +1,14 @@ - + Schema for :ref:`server-plugins-generators-cfg-sshkeys` ``pubkey.xml`` + + diff -Nru bcfg2-1.3.5/schemas/rules.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/rules.xsd --- bcfg2-1.3.5/schemas/rules.xsd 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/rules.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -13,10 +13,6 @@ - - - - @@ -126,15 +122,6 @@ - - - - PostInstall entries are deprecated in favor of Action - entries. Actions can do everything PostInstall entries can - do and more. - - - @@ -211,6 +198,14 @@ + + + + Override the global lax_decryption setting in + ``bcfg2.conf``. + + + diff -Nru bcfg2-1.3.5/schemas/sslca-cert.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/sslca-cert.xsd --- bcfg2-1.3.5/schemas/sslca-cert.xsd 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/sslca-cert.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -1,10 +1,15 @@ - + - Schema for :ref:`server-plugins-generators-sslca` ``cert.xml`` + Schema for :ref:`server-plugins-generators-cfg-ssl-certificates` + ``sslcert.xml`` + + @@ -17,10 +22,12 @@ + + @@ -40,6 +47,7 @@ + @@ -69,7 +77,7 @@ The full path to the key entry to use for this certificate. This is the *client* path; e.g., for a key defined at - ``/var/lib/bcfg2/SSLCA/etc/pki/tls/private/foo.key/key.xml``, + ``/var/lib/bcfg2/SSLCA/etc/pki/tls/private/foo.key/sslkey.xml``, **key** should be ``/etc/pki/tls/private/foo.key``. @@ -85,8 +93,8 @@ The name of the CA (from :ref:`bcfg2.conf - <sslca-configuration>`) to use to generate this - certificate. + <server-plugins-generators-cfg-configuration>`) to use + to generate this certificate. @@ -147,21 +155,32 @@ + - + Top-level tag for describing an SSLCA generated certificate. - - - - - - - - - + + + + + + + + + + + + Override the global lax_decryption setting in + ``bcfg2.conf``. + + + + + + diff -Nru bcfg2-1.3.5/schemas/sslca-key.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/sslca-key.xsd --- bcfg2-1.3.5/schemas/sslca-key.xsd 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/sslca-key.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -1,10 +1,15 @@ - + - Schema for :ref:`server-plugins-generators-sslca` ``key.xml`` + Schema for :ref:`server-plugins-generators-cfg-ssl-certificates` + ``sslkey.xml`` + + @@ -17,9 +22,11 @@ + + @@ -39,6 +46,7 @@ + @@ -68,20 +76,46 @@ + - + Top-level tag for describing an SSLCA generated key. - - - - - - - - + + + + + + + + + + + Create keys on a per-host basis (rather than on a per-group + basis). + + + + + + + Create keys specific to the given category, instead of + specific to the category given in ``bcfg2.conf``. + + + + + + + Create group-specific keys with the given priority. + + + + + + diff -Nru bcfg2-1.3.5/schemas/types.xsd bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/types.xsd --- bcfg2-1.3.5/schemas/types.xsd 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/schemas/types.xsd 2017-01-10 19:18:17.000000000 +0000 @@ -105,13 +105,6 @@ - - - - - - - @@ -399,6 +392,14 @@ + + + + The name of the encryption passphrase that the text content + of this tag is encrypted with. + + + @@ -438,6 +439,16 @@ + + + + + + + + + + @@ -446,6 +457,8 @@ + + @@ -474,9 +487,9 @@ This field is typically used to record general information - about the account or its user(s) such as their real name - and phone number. If this is not set, the GECOS will be - the same as the username. + about the account or its user(s) such as their real name + and phone number. If this is not set, the GECOS will be + the same as the username. diff -Nru bcfg2-1.3.5/setup.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/setup.py --- bcfg2-1.3.5/setup.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/setup.py 2017-01-10 19:18:17.000000000 +0000 @@ -30,6 +30,7 @@ # nosetests test_suite='nose.collector', packages=["Bcfg2", + "Bcfg2.Options", "Bcfg2.Client", "Bcfg2.Client.Tools", "Bcfg2.Client.Tools.POSIX", @@ -39,10 +40,7 @@ "Bcfg2.Reporting.migrations", "Bcfg2.Reporting.templatetags", 'Bcfg2.Server', - "Bcfg2.Server.Admin", "Bcfg2.Server.FileMonitor", - "Bcfg2.Server.Hostbase", - "Bcfg2.Server.Hostbase.hostbase", "Bcfg2.Server.Lint", "Bcfg2.Server.Plugin", "Bcfg2.Server.Plugins", @@ -50,10 +48,9 @@ "Bcfg2.Server.Plugins.Cfg", "Bcfg2.Server.Reports", "Bcfg2.Server.Reports.reports", - "Bcfg2.Server.Snapshots", ], install_requires=inst_reqs, - tests_require=['mock', 'nose', 'sqlalchemy'], + tests_require=['mock', 'nose'], package_dir={'': 'src/lib', }, package_data={'Bcfg2.Reporting': ['templates/*.html', 'templates/*/*.html', @@ -69,12 +66,6 @@ ('share/man/man1', glob("man/bcfg2.1")), ('share/man/man5', glob("man/*.5")), ('share/man/man8', glob("man/*.8")), - ('share/bcfg2/Hostbase/templates', - glob('src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/*.*')), - ('share/bcfg2/Hostbase/templates/hostbase', - glob('src/lib/Bcfg2/Server/Hostbase/hostbase/webtemplates/hostbase/*')), - ('share/bcfg2/Hostbase/repo', - glob('src/lib/Bcfg2/Server/Hostbase/templates/*')), ('share/bcfg2/site_media', glob('reports/site_media/*')), ] diff -Nru bcfg2-1.3.5/solaris/Makefile bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris/Makefile --- bcfg2-1.3.5/solaris/Makefile 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris/Makefile 2017-01-10 19:18:17.000000000 +0000 @@ -1,8 +1,8 @@ #!/usr/sfw/bin/gmake PYTHON="/usr/local/bin/python" -VERS=1.3.5-1 -export PYVERSION := $(shell $(PYTHON) -c "import sys; print sys.version[0:3]") +VERS=1.4.0pre2-1 +PYVERSION := $(shell $(PYTHON) -c "import sys; print sys.version[0:3]") default: clean package diff -Nru bcfg2-1.3.5/solaris/pkginfo.bcfg2 bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris/pkginfo.bcfg2 --- bcfg2-1.3.5/solaris/pkginfo.bcfg2 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris/pkginfo.bcfg2 2017-01-10 19:18:17.000000000 +0000 @@ -1,7 +1,7 @@ PKG="SCbcfg2" NAME="bcfg2" ARCH="sparc" -VERSION="1.3.5" +VERSION="1.4.0pre2" CATEGORY="application" VENDOR="Argonne National Labratory" EMAIL="bcfg-dev@mcs.anl.gov" diff -Nru bcfg2-1.3.5/solaris/pkginfo.bcfg2-server bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris/pkginfo.bcfg2-server --- bcfg2-1.3.5/solaris/pkginfo.bcfg2-server 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris/pkginfo.bcfg2-server 2017-01-10 19:18:17.000000000 +0000 @@ -1,7 +1,7 @@ PKG="SCbcfg2-server" NAME="bcfg2-server" ARCH="sparc" -VERSION="1.3.5" +VERSION="1.4.0pre2" CATEGORY="application" VENDOR="Argonne National Labratory" EMAIL="bcfg-dev@mcs.anl.gov" diff -Nru bcfg2-1.3.5/solaris/prototype.bcfg2 bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris/prototype.bcfg2 --- bcfg2-1.3.5/solaris/prototype.bcfg2 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris/prototype.bcfg2 2017-01-10 19:18:17.000000000 +0000 @@ -4,8 +4,6 @@ d none lib/PYVERSION/site-packages 0755 root bin d none lib/PYVERSION/site-packages/Bcfg2 0755 bin bin f none lib/PYVERSION/site-packages/Bcfg2/__init__.py 0644 bin bin -f none lib/PYVERSION/site-packages/Bcfg2/Statistics.py 0644 bin bin -f none lib/PYVERSION/site-packages/Bcfg2/SSLServer.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Component.py 0644 bin bin d none lib/PYVERSION/site-packages/Bcfg2/Client 0755 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/XML.py 0644 bin bin @@ -13,7 +11,7 @@ f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/Action.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/IPS.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/FreeBSDInit.py 0644 bin bin -f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/RPMng.py 0644 bin bin +f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/RPM.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/Chkconfig.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/RcUpdate.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/APT.py 0644 bin bin @@ -24,15 +22,15 @@ f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/launchd.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/FreeBSDPackage.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/Blast.py 0644 bin bin -f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/YUMng.py 0644 bin bin +f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/YUM.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/Portage.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/DebInit.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/Encap.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Tools/SMF.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/__init__.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Client/Frame.py 0644 bin bin +f none lib/PYVERSION/site-packages/Bcfg2/Client/Proxy.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Logger.py 0644 bin bin -f none lib/PYVERSION/site-packages/Bcfg2/Proxy.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Options.py 0644 bin bin d none bin 0755 root bin f none bin/bcfg2 0755 bin bin diff -Nru bcfg2-1.3.5/solaris/prototype.bcfg2-server bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris/prototype.bcfg2-server --- bcfg2-1.3.5/solaris/prototype.bcfg2-server 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris/prototype.bcfg2-server 2017-01-10 19:18:17.000000000 +0000 @@ -8,7 +8,6 @@ f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/Tidy.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/Minestruct.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/__init__.py 0644 bin bin -f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/Snapshots.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/Init.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/Group.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Admin/Compare.py 0644 bin bin @@ -28,35 +27,25 @@ f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/SSHbase.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Trigger.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/__init__.py 0644 bin bin -f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Snapshots.py 0644 bin bin -f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/TCheetah.py 0644 bin bin -f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Account.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Cfg.py 0644 bin bin -f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Statistics.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Metadata.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Base.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Pkgmgr.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Ohai.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Properties.py 0644 bin bin -f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Editor.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Bundler.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/NagiosGen.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Deps.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Svn.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/DBStats.py 0644 bin bin -f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/TGenshi.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Plugins/Git.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/FileMonitor.py 0644 bin bin -d none lib/PYVERSION/site-packages/Bcfg2/Server/Snapshots 0755 bin bin -f none lib/PYVERSION/site-packages/Bcfg2/Server/Snapshots/model.py 0644 bin bin -f none lib/PYVERSION/site-packages/Bcfg2/Server/Snapshots/__init__.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Server/Core.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/__init__.py 0644 bin bin -f none lib/PYVERSION/site-packages/Bcfg2/Statistics.py 0644 bin bin -f none lib/PYVERSION/site-packages/Bcfg2/SSLServer.py 0644 bin bin +f none lib/PYVERSION/site-packages/Bcfg2/Server/Statistics.py 0644 bin bin +f none lib/PYVERSION/site-packages/Bcfg2/Server/SSLServer.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Component.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Logger.py 0644 bin bin -f none lib/PYVERSION/site-packages/Bcfg2/Proxy.py 0644 bin bin f none lib/PYVERSION/site-packages/Bcfg2/Options.py 0644 bin bin d none bin 0755 bin bin f none bin/bcfg2-server 0755 bin bin diff -Nru bcfg2-1.3.5/solaris-ips/Makefile bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris-ips/Makefile --- bcfg2-1.3.5/solaris-ips/Makefile 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris-ips/Makefile 2017-01-10 19:18:17.000000000 +0000 @@ -1,6 +1,6 @@ #!/usr/bin/gmake -VERS=1.3.5-1 +VERS=1.4.0pre2-1 PYVERSION := $(shell python -c "import sys; print sys.version[0:3]") default: clean package diff -Nru bcfg2-1.3.5/solaris-ips/MANIFEST.bcfg2.header bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris-ips/MANIFEST.bcfg2.header --- bcfg2-1.3.5/solaris-ips/MANIFEST.bcfg2.header 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris-ips/MANIFEST.bcfg2.header 2017-01-10 19:18:17.000000000 +0000 @@ -1,5 +1,5 @@ license ../../LICENSE license=simplified_bsd set name=description value="Configuration management client" set name=pkg.summary value="Configuration management client" -set name=pkg.fmri value="pkg://bcfg2/bcfg2@1.3.5" +set name=pkg.fmri value="pkg://bcfg2/bcfg2@1.4.0pre2" file usr/bin/bcfg2 group=bin mode=0755 owner=root path=usr/bin/bcfg2 diff -Nru bcfg2-1.3.5/solaris-ips/MANIFEST.bcfg2-server.header bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris-ips/MANIFEST.bcfg2-server.header --- bcfg2-1.3.5/solaris-ips/MANIFEST.bcfg2-server.header 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris-ips/MANIFEST.bcfg2-server.header 2017-01-10 19:18:17.000000000 +0000 @@ -1,4 +1,4 @@ license ../../LICENSE license=simplified_bsd set name=description value="Configuration management server" set name=pkg.summary value="Configuration management server" -set name=pkg.fmri value="pkg://bcfg2/bcfg2-server@1.3.5" +set name=pkg.fmri value="pkg://bcfg2/bcfg2-server@1.4.0pre2" diff -Nru bcfg2-1.3.5/solaris-ips/pkginfo.bcfg2 bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris-ips/pkginfo.bcfg2 --- bcfg2-1.3.5/solaris-ips/pkginfo.bcfg2 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris-ips/pkginfo.bcfg2 2017-01-10 19:18:17.000000000 +0000 @@ -1,7 +1,7 @@ PKG="SCbcfg2" NAME="bcfg2" ARCH="sparc" -VERSION="1.3.5" +VERSION="1.4.0pre2" CATEGORY="application" VENDOR="Argonne National Labratory" EMAIL="bcfg-dev@mcs.anl.gov" diff -Nru bcfg2-1.3.5/solaris-ips/pkginfo.bcfg2-server bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris-ips/pkginfo.bcfg2-server --- bcfg2-1.3.5/solaris-ips/pkginfo.bcfg2-server 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/solaris-ips/pkginfo.bcfg2-server 2017-01-10 19:18:17.000000000 +0000 @@ -1,7 +1,7 @@ PKG="SCbcfg2-server" NAME="bcfg2-server" ARCH="sparc" -VERSION="1.3.5" +VERSION="1.4.0pre2" CATEGORY="application" VENDOR="Argonne National Labratory" EMAIL="bcfg-dev@mcs.anl.gov" diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Cache.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Cache.py --- bcfg2-1.3.5/src/lib/Bcfg2/Cache.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Cache.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -""" An implementation of a simple memory-backed cache. Right now this -doesn't provide many features, but more (time-based expiration, etc.) -can be added as necessary. """ - - -class Cache(dict): - """ an implementation of a simple memory-backed cache """ - - def expire(self, key=None): - """ expire all items, or a specific item, from the cache """ - if key is None: - self.clear() - elif key in self: - del self[key] diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Client.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Client.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Client.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Client.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,340 +0,0 @@ -""" The main Bcfg2 client class """ - -import os -import sys -import stat -import time -import fcntl -import socket -import logging -import tempfile -import Bcfg2.Proxy -import Bcfg2.Logger -import Bcfg2.Options -import Bcfg2.Client.XML -import Bcfg2.Client.Frame -import Bcfg2.Client.Tools -from Bcfg2.Utils import locked, Executor -from Bcfg2.Compat import xmlrpclib -from Bcfg2.version import __version__ - - -class Client(object): - """ The main Bcfg2 client class """ - - def __init__(self, setup): - self.toolset = None - self.tools = None - self.config = None - self._proxy = None - self.setup = setup - - if self.setup['debug']: - level = logging.DEBUG - elif self.setup['verbose']: - level = logging.INFO - else: - level = logging.WARNING - Bcfg2.Logger.setup_logging('bcfg2', - to_syslog=self.setup['syslog'], - level=level, - to_file=self.setup['logging']) - self.logger = logging.getLogger('bcfg2') - self.logger.debug(self.setup) - - self.cmd = Executor(self.setup['command_timeout']) - - if self.setup['bundle_quick']: - if not self.setup['bundle'] and not self.setup['skipbundle']: - self.logger.error("-Q option requires -b or -B") - raise SystemExit(1) - elif self.setup['remove']: - self.logger.error("-Q option incompatible with -r") - raise SystemExit(1) - if 'drivers' in self.setup and self.setup['drivers'] == 'help': - self.logger.info("The following drivers are available:") - self.logger.info(Bcfg2.Client.Tools.drivers) - raise SystemExit(0) - if self.setup['remove'] and 'services' in self.setup['remove'].lower(): - self.logger.error("Service removal is nonsensical; " - "removed services will only be disabled") - if (self.setup['remove'] and - self.setup['remove'].lower() not in ['all', 'services', 'packages', - 'users']): - self.logger.error("Got unknown argument %s for -r" % - self.setup['remove']) - if self.setup["file"] and self.setup["cache"]: - print("cannot use -f and -c together") - raise SystemExit(1) - if not self.setup['server'].startswith('https://'): - self.setup['server'] = 'https://' + self.setup['server'] - - def _probe_failure(self, probename, msg): - """ handle failure of a probe in the way the user wants us to - (exit or continue) """ - message = "Failed to execute probe %s: %s" % (probename, msg) - if self.setup['probe_exit']: - self.fatal_error(message) - else: - self.logger.error(message) - - def run_probe(self, probe): - """Execute probe.""" - name = probe.get('name') - self.logger.info("Running probe %s" % name) - ret = Bcfg2.Client.XML.Element("probe-data", - name=name, - source=probe.get('source')) - try: - scripthandle, scriptname = tempfile.mkstemp() - script = os.fdopen(scripthandle, 'w') - try: - script.write("#!%s\n" % - (probe.attrib.get('interpreter', '/bin/sh'))) - if sys.hexversion >= 0x03000000: - script.write(probe.text) - else: - script.write(probe.text.encode('utf-8')) - script.close() - os.chmod(scriptname, - stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | - stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | - stat.S_IWUSR) # 0755 - rv = self.cmd.run(scriptname, timeout=self.setup['timeout']) - if rv.stderr: - self.logger.warning("Probe %s has error output: %s" % - (name, rv.stderr)) - if not rv.success: - self._probe_failure(name, "Return value %s" % rv) - self.logger.info("Probe %s has result:" % name) - self.logger.info(rv.stdout) - if sys.hexversion >= 0x03000000: - ret.text = rv.stdout - else: - ret.text = rv.stdout.decode('utf-8') - finally: - os.unlink(scriptname) - except SystemExit: - raise - except: - self._probe_failure(name, sys.exc_info()[1]) - return ret - - def fatal_error(self, message): - """Signal a fatal error.""" - self.logger.error("Fatal error: %s" % (message)) - raise SystemExit(1) - - @property - def proxy(self): - """ get an XML-RPC proxy to the server """ - if self._proxy is None: - self._proxy = Bcfg2.Proxy.ComponentProxy( - self.setup['server'], - self.setup['user'], - self.setup['password'], - key=self.setup['key'], - cert=self.setup['certificate'], - ca=self.setup['ca'], - allowedServerCNs=self.setup['serverCN'], - timeout=self.setup['timeout'], - retries=int(self.setup['retries']), - delay=int(self.setup['retry_delay'])) - return self._proxy - - def run_probes(self, times=None): - """ run probes and upload probe data """ - if times is None: - times = dict() - - try: - probes = Bcfg2.Client.XML.XML(str(self.proxy.GetProbes())) - except (Bcfg2.Proxy.ProxyError, - Bcfg2.Proxy.CertificateError, - socket.gaierror, - socket.error): - err = sys.exc_info()[1] - self.fatal_error("Failed to download probes from bcfg2: %s" % err) - except Bcfg2.Client.XML.ParseError: - err = sys.exc_info()[1] - self.fatal_error("Server returned invalid probe requests: %s" % - err) - - times['probe_download'] = time.time() - - # execute probes - probedata = Bcfg2.Client.XML.Element("ProbeData") - for probe in probes.findall(".//probe"): - probedata.append(self.run_probe(probe)) - - if len(probes.findall(".//probe")) > 0: - try: - # upload probe responses - self.proxy.RecvProbeData( - Bcfg2.Client.XML.tostring( - probedata, - xml_declaration=False).decode('utf-8')) - except Bcfg2.Proxy.ProxyError: - err = sys.exc_info()[1] - self.fatal_error("Failed to upload probe data: %s" % err) - - times['probe_upload'] = time.time() - - def get_config(self, times=None): - """ load the configuration, either from the cached - configuration file (-f), or from the server """ - if times is None: - times = dict() - - if self.setup['file']: - # read config from file - try: - self.logger.debug("Reading cached configuration from %s" % - self.setup['file']) - return open(self.setup['file'], 'r').read() - except IOError: - self.fatal_error("Failed to read cached configuration from: %s" - % (self.setup['file'])) - else: - # retrieve config from server - if self.setup['profile']: - try: - self.proxy.AssertProfile(self.setup['profile']) - except Bcfg2.Proxy.ProxyError: - err = sys.exc_info()[1] - self.fatal_error("Failed to set client profile: %s" % err) - - try: - self.proxy.DeclareVersion(__version__) - except xmlrpclib.Fault: - err = sys.exc_info()[1] - if (err.faultCode == xmlrpclib.METHOD_NOT_FOUND or - (err.faultCode == 7 and - err.faultString.startswith("Unknown method"))): - self.logger.debug("Server does not support declaring " - "client version") - else: - self.logger.error("Failed to declare version: %s" % err) - except (Bcfg2.Proxy.ProxyError, - Bcfg2.Proxy.CertificateError, - socket.gaierror, - socket.error): - err = sys.exc_info()[1] - self.logger.error("Failed to declare version: %s" % err) - - self.run_probes(times=times) - - if self.setup['decision'] in ['whitelist', 'blacklist']: - try: - self.setup['decision_list'] = \ - self.proxy.GetDecisionList(self.setup['decision']) - self.logger.info("Got decision list from server:") - self.logger.info(self.setup['decision_list']) - except Bcfg2.Proxy.ProxyError: - err = sys.exc_info()[1] - self.fatal_error("Failed to get decision list: %s" % err) - - try: - rawconfig = self.proxy.GetConfig().encode('utf-8') - except Bcfg2.Proxy.ProxyError: - err = sys.exc_info()[1] - self.fatal_error("Failed to download configuration from " - "Bcfg2: %s" % err) - - times['config_download'] = time.time() - return rawconfig - - def run(self): - """Perform client execution phase.""" - times = {} - - # begin configuration - times['start'] = time.time() - - self.logger.info("Starting Bcfg2 client run at %s" % times['start']) - - rawconfig = self.get_config(times=times).decode('utf-8') - - if self.setup['cache']: - try: - open(self.setup['cache'], 'w').write(rawconfig) - os.chmod(self.setup['cache'], 33152) - except IOError: - self.logger.warning("Failed to write config cache file %s" % - (self.setup['cache'])) - times['caching'] = time.time() - - try: - self.config = Bcfg2.Client.XML.XML(rawconfig) - except Bcfg2.Client.XML.ParseError: - syntax_error = sys.exc_info()[1] - self.fatal_error("The configuration could not be parsed: %s" % - syntax_error) - - times['config_parse'] = time.time() - - if self.config.tag == 'error': - self.fatal_error("Server error: %s" % (self.config.text)) - return(1) - - if self.setup['bundle_quick']: - newconfig = Bcfg2.Client.XML.XML('') - for bundle in self.config.getchildren(): - if (bundle.tag == 'Bundle' and - ((self.setup['bundle'] and - bundle.get('name') in self.setup['bundle']) or - (self.setup['skipbundle'] and - bundle.get('name') not in self.setup['skipbundle']))): - newconfig.append(bundle) - self.config = newconfig - - self.tools = Bcfg2.Client.Frame.Frame(self.config, - self.setup, - times, self.setup['drivers'], - self.setup['dryrun']) - - if not self.setup['omit_lock_check']: - # check lock here - try: - lockfile = open(self.setup['lockfile'], 'w') - if locked(lockfile.fileno()): - self.fatal_error("Another instance of Bcfg2 is running. " - "If you want to bypass the check, run " - "with the %s option" % - Bcfg2.Options.OMIT_LOCK_CHECK.cmd) - except SystemExit: - raise - except: - lockfile = None - self.logger.error("Failed to open lockfile %s: %s" % - (self.setup['lockfile'], sys.exc_info()[1])) - - # execute the configuration - self.tools.Execute() - - if not self.setup['omit_lock_check']: - # unlock here - if lockfile: - try: - fcntl.lockf(lockfile.fileno(), fcntl.LOCK_UN) - os.remove(self.setup['lockfile']) - except OSError: - self.logger.error("Failed to unlock lockfile %s" % - lockfile.name) - - if not self.setup['file'] and not self.setup['bundle_quick']: - # upload statistics - feedback = self.tools.GenerateStats() - - try: - self.proxy.RecvStats( - Bcfg2.Client.XML.tostring( - feedback, - xml_declaration=False).decode('utf-8')) - except Bcfg2.Proxy.ProxyError: - err = sys.exc_info()[1] - self.logger.error("Failed to upload configuration statistics: " - "%s" % err) - raise SystemExit(2) - - self.logger.info("Finished Bcfg2 client run at %s" % time.time()) diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Frame.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Frame.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Frame.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Frame.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,536 +0,0 @@ -""" Frame is the Client Framework that verifies and installs entries, -and generates statistics. """ - -import copy -import time -import fnmatch -import logging -import Bcfg2.Client.Tools -from Bcfg2.Client import prompt -from Bcfg2.Compat import any, all # pylint: disable=W0622 - - -def matches_entry(entryspec, entry): - """ Determine if the Decisions-style entry specification matches - the entry. Both are tuples of (tag, name). The entryspec can - handle the wildcard * in either position. """ - if entryspec == entry: - return True - return all(fnmatch.fnmatch(entry[i], entryspec[i]) for i in [0, 1]) - - -def matches_white_list(entry, whitelist): - """ Return True if (, ) is in the given - whitelist. """ - return any(matches_entry(we, (entry.tag, entry.get('name'))) - for we in whitelist) - - -def passes_black_list(entry, blacklist): - """ Return True if (, ) is not in the given - blacklist. """ - return not any(matches_entry(be, (entry.tag, entry.get('name'))) - for be in blacklist) - - -# pylint: disable=W0702 -# in frame we frequently want to catch all exceptions, regardless of -# type, so disable the pylint rule that catches that. - - -class Frame(object): - """Frame is the container for all Tool objects and state information.""" - - def __init__(self, config, setup, times, drivers, dryrun): - self.config = config - self.times = times - self.dryrun = dryrun - self.times['initialization'] = time.time() - self.setup = setup - self.tools = [] - self.states = {} - self.whitelist = [] - self.blacklist = [] - self.removal = [] - self.logger = logging.getLogger(__name__) - for driver in drivers[:]: - if (driver not in Bcfg2.Client.Tools.drivers and - isinstance(driver, str)): - self.logger.error("Tool driver %s is not available" % driver) - drivers.remove(driver) - - tclass = {} - for tool in drivers: - if not isinstance(tool, str): - tclass[time.time()] = tool - tool_class = "Bcfg2.Client.Tools.%s" % tool - try: - tclass[tool] = getattr(__import__(tool_class, globals(), - locals(), ['*']), - tool) - except ImportError: - continue - except: - self.logger.error("Tool %s unexpectedly failed to load" % tool, - exc_info=1) - - for tool in list(tclass.values()): - try: - self.tools.append(tool(self.logger, setup, config)) - except Bcfg2.Client.Tools.ToolInstantiationError: - continue - except: - self.logger.error("Failed to instantiate tool %s" % tool, - exc_info=1) - - for tool in self.tools[:]: - for conflict in getattr(tool, 'conflicts', []): - for item in self.tools: - if item.name == conflict: - self.tools.remove(item) - - self.logger.info("Loaded tool drivers:") - self.logger.info([tool.name for tool in self.tools]) - - deprecated = [tool.name for tool in self.tools if tool.deprecated] - if deprecated: - self.logger.warning("Loaded deprecated tool drivers:") - self.logger.warning(deprecated) - experimental = [tool.name for tool in self.tools if tool.experimental] - if experimental: - self.logger.info("Loaded experimental tool drivers:") - self.logger.info(experimental) - - # find entries not handled by any tools - self.unhandled = [entry for struct in config - for entry in struct - if entry not in self.handled] - - if self.unhandled: - self.logger.error("The following entries are not handled by any " - "tool:") - for entry in self.unhandled: - self.logger.error("%s:%s:%s" % (entry.tag, entry.get('type'), - entry.get('name'))) - - self.find_dups(config) - - pkgs = [(entry.get('name'), entry.get('origin')) - for struct in config - for entry in struct - if entry.tag == 'Package'] - if pkgs: - self.logger.debug("The following packages are specified in bcfg2:") - self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] is None]) - self.logger.debug("The following packages are prereqs added by " - "Packages:") - self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == 'Packages']) - - def find_dups(self, config): - """ Find duplicate entries and warn about them """ - entries = dict() - for struct in config: - for entry in struct: - for tool in self.tools: - if tool.handlesEntry(entry): - pkey = tool.primarykey(entry) - if pkey in entries: - entries[pkey] += 1 - else: - entries[pkey] = 1 - multi = [e for e, c in entries.items() if c > 1] - if multi: - self.logger.debug("The following entries are included multiple " - "times:") - for entry in multi: - self.logger.debug(entry) - - def promptFilter(self, msg, entries): - """Filter a supplied list based on user input.""" - ret = [] - entries.sort(key=lambda e: e.tag + ":" + e.get('name')) - for entry in entries[:]: - if entry in self.unhandled: - # don't prompt for entries that can't be installed - continue - if 'qtext' in entry.attrib: - iprompt = entry.get('qtext') - else: - iprompt = msg % (entry.tag, entry.get('name')) - if prompt(iprompt): - ret.append(entry) - return ret - - def __getattr__(self, name): - if name in ['extra', 'handled', 'modified', '__important__']: - ret = [] - for tool in self.tools: - ret += getattr(tool, name) - return ret - elif name in self.__dict__: - return self.__dict__[name] - raise AttributeError(name) - - def InstallImportant(self): - """Install important entries - - We also process the decision mode stuff here because we want to prevent - non-whitelisted/blacklisted 'important' entries from being installed - prior to determining the decision mode on the client. - """ - # Need to process decision stuff early so that dryrun mode - # works with it - self.whitelist = [entry for entry in self.states - if not self.states[entry]] - if not self.setup['file']: - if self.setup['decision'] == 'whitelist': - dwl = self.setup['decision_list'] - w_to_rem = [e for e in self.whitelist - if not matches_white_list(e, dwl)] - if w_to_rem: - self.logger.info("In whitelist mode: " - "suppressing installation of:") - self.logger.info(["%s:%s" % (e.tag, e.get('name')) - for e in w_to_rem]) - self.whitelist = [x for x in self.whitelist - if x not in w_to_rem] - elif self.setup['decision'] == 'blacklist': - b_to_rem = \ - [e for e in self.whitelist - if not passes_black_list(e, self.setup['decision_list'])] - if b_to_rem: - self.logger.info("In blacklist mode: " - "suppressing installation of:") - self.logger.info(["%s:%s" % (e.tag, e.get('name')) - for e in b_to_rem]) - self.whitelist = [x for x in self.whitelist - if x not in b_to_rem] - - # take care of important entries first - if not self.dryrun: - parent_map = dict((c, p) - for p in self.config.getiterator() - for c in p) - for cfile in self.config.findall(".//Path"): - if (cfile.get('name') not in self.__important__ or - cfile.get('type') != 'file' or - cfile not in self.whitelist): - continue - parent = parent_map[cfile] - if ((parent.tag == "Bundle" and - ((self.setup['bundle'] and - parent.get("name") not in self.setup['bundle']) or - (self.setup['skipbundle'] and - parent.get("name") in self.setup['skipbundle']))) or - (parent.tag == "Independent" and - (self.setup['bundle'] or self.setup['skipindep']))): - continue - tools = [t for t in self.tools - if t.handlesEntry(cfile) and t.canVerify(cfile)] - if tools: - if (self.setup['interactive'] and not - self.promptFilter("Install %s: %s? (y/N):", [cfile])): - self.whitelist.remove(cfile) - continue - try: - self.states[cfile] = tools[0].InstallPath(cfile) - if self.states[cfile]: - tools[0].modified.append(cfile) - except: - self.logger.error("Unexpected tool failure", - exc_info=1) - cfile.set('qtext', '') - if tools[0].VerifyPath(cfile, []): - self.whitelist.remove(cfile) - - def Inventory(self): - """ - Verify all entries, - find extra entries, - and build up workqueues - - """ - # initialize all states - for struct in self.config.getchildren(): - for entry in struct.getchildren(): - self.states[entry] = False - for tool in self.tools: - try: - tool.Inventory(self.states) - except: - self.logger.error("%s.Inventory() call failed:" % tool.name, - exc_info=1) - - def Decide(self): # pylint: disable=R0912 - """Set self.whitelist based on user interaction.""" - iprompt = "Install %s: %s? (y/N): " - rprompt = "Remove %s: %s? (y/N): " - if self.setup['remove']: - if self.setup['remove'] == 'all': - self.removal = self.extra - elif self.setup['remove'].lower() == 'services': - self.removal = [entry for entry in self.extra - if entry.tag == 'Service'] - elif self.setup['remove'].lower() == 'packages': - self.removal = [entry for entry in self.extra - if entry.tag == 'Package'] - elif self.setup['remove'].lower() == 'users': - self.removal = [entry for entry in self.extra - if entry.tag in ['POSIXUser', 'POSIXGroup']] - - candidates = [entry for entry in self.states - if not self.states[entry]] - - if self.dryrun: - if self.whitelist: - self.logger.info("In dryrun mode: " - "suppressing entry installation for:") - self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) - for entry in self.whitelist]) - self.whitelist = [] - if self.removal: - self.logger.info("In dryrun mode: " - "suppressing entry removal for:") - self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) - for entry in self.removal]) - self.removal = [] - - # Here is where most of the work goes - # first perform bundle filtering - all_bundle_names = [b.get('name') - for b in self.config.findall('./Bundle')] - bundles = self.config.getchildren() - if self.setup['bundle']: - # warn if non-existent bundle given - for bundle in self.setup['bundle']: - if bundle not in all_bundle_names: - self.logger.info("Warning: Bundle %s not found" % bundle) - bundles = [b for b in bundles - if b.get('name') in self.setup['bundle']] - elif self.setup['indep']: - bundles = [b for b in bundles if b.tag != 'Bundle'] - if self.setup['skipbundle']: - # warn if non-existent bundle given - if not self.setup['bundle_quick']: - for bundle in self.setup['skipbundle']: - if bundle not in all_bundle_names: - self.logger.info("Warning: Bundle %s not found" % - bundle) - bundles = [b for b in bundles - if b.get('name') not in self.setup['skipbundle']] - if self.setup['skipindep']: - bundles = [b for b in bundles if b.tag == 'Bundle'] - - self.whitelist = [e for e in self.whitelist - if any(e in b for b in bundles)] - - # first process prereq actions - for bundle in bundles[:]: - if bundle.tag != 'Bundle': - continue - bmodified = len([item for item in bundle - if item in self.whitelist or - item in self.modified]) - actions = [a for a in bundle.findall('./Action') - if (a.get('timing') != 'post' and - (bmodified or a.get('when') == 'always'))] - # now we process all "pre" and "both" actions that are either - # always or the bundle has been modified - if self.setup['interactive']: - self.promptFilter(iprompt, actions) - self.DispatchInstallCalls(actions) - - # need to test to fail entries in whitelist - if False in [self.states[a] for a in actions]: - # then display bundles forced off with entries - self.logger.info("Bundle %s failed prerequisite action" % - (bundle.get('name'))) - bundles.remove(bundle) - b_to_remv = [ent for ent in self.whitelist if ent in bundle] - if b_to_remv: - self.logger.info("Not installing entries from Bundle %s" % - (bundle.get('name'))) - self.logger.info(["%s:%s" % (e.tag, e.get('name')) - for e in b_to_remv]) - for ent in b_to_remv: - self.whitelist.remove(ent) - - self.logger.debug("Installing entries in the following bundle(s):") - self.logger.debug(" %s" % ", ".join(b.get("name") for b in bundles - if b.get("name"))) - - if self.setup['interactive']: - self.whitelist = self.promptFilter(iprompt, self.whitelist) - self.removal = self.promptFilter(rprompt, self.removal) - - for entry in candidates: - if entry not in self.whitelist: - self.blacklist.append(entry) - - def DispatchInstallCalls(self, entries): - """Dispatch install calls to underlying tools.""" - for tool in self.tools: - handled = [entry for entry in entries if tool.canInstall(entry)] - if not handled: - continue - try: - tool.Install(handled, self.states) - except: - self.logger.error("%s.Install() call failed:" % tool.name, - exc_info=1) - - def Install(self): - """Install all entries.""" - self.DispatchInstallCalls(self.whitelist) - mods = self.modified - mbundles = [struct for struct in self.config.findall('Bundle') - if any(True for mod in mods if mod in struct)] - - if self.modified: - # Handle Bundle interdeps - if mbundles: - self.logger.info("The Following Bundles have been modified:") - self.logger.info([mbun.get('name') for mbun in mbundles]) - tbm = [(t, b) for t in self.tools for b in mbundles] - for tool, bundle in tbm: - try: - tool.Inventory(self.states, [bundle]) - except: - self.logger.error("%s.Inventory() call failed:" % - tool.name, - exc_info=1) - clobbered = [entry for bundle in mbundles for entry in bundle - if (not self.states[entry] and - entry not in self.blacklist)] - if clobbered: - self.logger.debug("Found clobbered entries:") - self.logger.debug(["%s:%s" % (entry.tag, entry.get('name')) - for entry in clobbered]) - if not self.setup['interactive']: - self.DispatchInstallCalls(clobbered) - - for bundle in self.config.findall('.//Bundle'): - if (self.setup['bundle'] and - bundle.get('name') not in self.setup['bundle']): - # prune out unspecified bundles when running with -b - continue - if bundle in mbundles: - self.logger.debug("Bundle %s was modified" % - bundle.get('name')) - func = "BundleUpdated" - else: - self.logger.debug("Bundle %s was not modified" % - bundle.get('name')) - func = "BundleNotUpdated" - for tool in self.tools: - try: - getattr(tool, func)(bundle, self.states) - except: - self.logger.error("%s.%s() call failed:" % - (tool.name, func), exc_info=1) - - def Remove(self): - """Remove extra entries.""" - for tool in self.tools: - extras = [entry for entry in self.removal - if tool.handlesEntry(entry)] - if extras: - try: - tool.Remove(extras) - except: - self.logger.error("%s.Remove() failed" % tool.name, - exc_info=1) - - def CondDisplayState(self, phase): - """Conditionally print tracing information.""" - self.logger.info('Phase: %s' % phase) - self.logger.info('Correct entries: %d' % - list(self.states.values()).count(True)) - self.logger.info('Incorrect entries: %d' % - list(self.states.values()).count(False)) - if phase == 'final' and list(self.states.values()).count(False): - for entry in sorted(self.states.keys(), key=lambda e: e.tag + ":" + - e.get('name')): - if not self.states[entry]: - etype = entry.get('type') - if etype: - self.logger.info("%s:%s:%s" % (entry.tag, etype, - entry.get('name'))) - else: - self.logger.info("%s:%s" % (entry.tag, - entry.get('name'))) - self.logger.info('Total managed entries: %d' % - len(list(self.states.values()))) - self.logger.info('Unmanaged entries: %d' % len(self.extra)) - if phase == 'final' and self.setup['extra']: - for entry in sorted(self.extra, key=lambda e: e.tag + ":" + - e.get('name')): - etype = entry.get('type') - if etype: - self.logger.info("%s:%s:%s" % (entry.tag, etype, - entry.get('name'))) - else: - self.logger.info("%s:%s" % (entry.tag, - entry.get('name'))) - - if ((list(self.states.values()).count(False) == 0) and not self.extra): - self.logger.info('All entries correct.') - - def ReInventory(self): - """Recheck everything.""" - if not self.dryrun and self.setup['kevlar']: - self.logger.info("Rechecking system inventory") - self.Inventory() - - def Execute(self): - """Run all methods.""" - self.Inventory() - self.times['inventory'] = time.time() - self.CondDisplayState('initial') - self.InstallImportant() - self.Decide() - self.Install() - self.times['install'] = time.time() - self.Remove() - self.times['remove'] = time.time() - if self.modified: - self.ReInventory() - self.times['reinventory'] = time.time() - self.times['finished'] = time.time() - self.CondDisplayState('final') - - def GenerateStats(self): - """Generate XML summary of execution statistics.""" - feedback = Bcfg2.Client.XML.Element("upload-statistics") - stats = Bcfg2.Client.XML.SubElement( - feedback, - 'Statistics', - total=str(len(self.states)), - version='2.0', - revision=self.config.get('revision', '-1')) - good_entries = [key for key, val in list(self.states.items()) if val] - good = len(good_entries) - stats.set('good', str(good)) - if any(not val for val in list(self.states.values())): - stats.set('state', 'dirty') - else: - stats.set('state', 'clean') - - # List bad elements of the configuration - for (data, ename) in [(self.modified, 'Modified'), - (self.extra, "Extra"), - (good_entries, "Good"), - ([entry for entry in self.states - if not self.states[entry]], "Bad")]: - container = Bcfg2.Client.XML.SubElement(stats, ename) - for item in data: - item.set('qtext', '') - container.append(copy.deepcopy(item)) - item.text = None - - timeinfo = Bcfg2.Client.XML.Element("OpStamps") - feedback.append(stats) - for (event, timestamp) in list(self.times.items()): - timeinfo.set(event, str(timestamp)) - stats.append(timeinfo) - return feedback diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/__init__.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/__init__.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/__init__.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/__init__.py 2017-01-10 19:18:17.000000000 +0000 @@ -2,8 +2,56 @@ import os import sys -import select -from Bcfg2.Compat import input # pylint: disable=W0622 +import stat +import time +import fcntl +import socket +import fnmatch +import logging +import argparse +import tempfile +import copy +import Bcfg2.Logger +import Bcfg2.Options +from Bcfg2.Client import XML +from Bcfg2.Client import Proxy +from Bcfg2.Client import Tools +from Bcfg2.Utils import locked, Executor, safe_input +from Bcfg2.version import __version__ +# pylint: disable=W0622 +from Bcfg2.Compat import xmlrpclib, walk_packages, any, all, cmp +# pylint: enable=W0622 + + +def cmpent(ent1, ent2): + """Sort entries.""" + if ent1.tag != ent2.tag: + return cmp(ent1.tag, ent2.tag) + else: + return cmp(ent1.get('name'), ent2.get('name')) + + +def matches_entry(entryspec, entry): + """ Determine if the Decisions-style entry specification matches + the entry. Both are tuples of (tag, name). The entryspec can + handle the wildcard * in either position. """ + if entryspec == entry: + return True + return all(fnmatch.fnmatch(entry[i], entryspec[i]) for i in [0, 1]) + + +def matches_white_list(entry, whitelist): + """ Return True if (, ) is in the given + whitelist. """ + return any(matches_entry(we, (entry.tag, entry.get('name'))) + for we in whitelist) + + +def passes_black_list(entry, blacklist): + """ Return True if (, ) is not in the given + blacklist. """ + return not any(matches_entry(be, (entry.tag, entry.get('name'))) + for be in blacklist) def prompt(msg): @@ -16,14 +64,901 @@ contain "[y/N]" if desired, etc. :type msg: string :returns: bool - True if yes, False if no """ - while len(select.select([sys.stdin.fileno()], [], [], 0.0)[0]) > 0: - os.read(sys.stdin.fileno(), 4096) try: - ans = input(msg) + ans = safe_input(msg) return ans in ['y', 'Y'] except UnicodeEncodeError: - ans = input(msg.encode('utf-8')) + ans = safe_input(msg.encode('utf-8')) return ans in ['y', 'Y'] - except EOFError: - # handle ^C on rhel-based platforms + except (EOFError, KeyboardInterrupt): + # handle ^C + raise SystemExit(1) + except: + print("Error while reading input: %s" % sys.exc_info()[1]) + return False + + +class ClientDriverAction(Bcfg2.Options.ComponentAction): + """ Action to load client drivers """ + bases = ['Bcfg2.Client.Tools'] + fail_silently = True + + +class Client(object): + """ The main Bcfg2 client class """ + + options = Proxy.ComponentProxy.options + [ + Bcfg2.Options.Common.syslog, + Bcfg2.Options.Common.interactive, + Bcfg2.Options.BooleanOption( + "-q", "--quick", help="Disable some checksum verification"), + Bcfg2.Options.Option( + cf=('client', 'probe_timeout'), + type=Bcfg2.Options.Types.timeout, + help="Timeout when running client probes"), + Bcfg2.Options.Option( + "-b", "--only-bundles", default=[], + type=Bcfg2.Options.Types.colon_list, + help='Only configure the given bundle(s)'), + Bcfg2.Options.Option( + "-B", "--except-bundles", default=[], + type=Bcfg2.Options.Types.colon_list, + help='Configure everything except the given bundle(s)'), + Bcfg2.Options.ExclusiveOptionGroup( + Bcfg2.Options.BooleanOption( + "-Q", "--bundle-quick", + help='Only verify the given bundle(s)'), + Bcfg2.Options.Option( + '-r', '--remove', + choices=['all', 'services', 'packages', 'users'], + help='Force removal of additional configuration items')), + Bcfg2.Options.ExclusiveOptionGroup( + Bcfg2.Options.PathOption( + '-f', '--file', type=argparse.FileType('rb'), + help='Configure from a file rather than querying the server'), + Bcfg2.Options.PathOption( + '-c', '--cache', type=argparse.FileType('wb'), + help='Store the configuration in a file')), + Bcfg2.Options.BooleanOption( + '--exit-on-probe-failure', default=True, + cf=('client', 'exit_on_probe_failure'), + help="The client should exit if a probe fails"), + Bcfg2.Options.Option( + '-p', '--profile', cf=('client', 'profile'), + help='Assert the given profile for the host'), + Bcfg2.Options.Option( + '-l', '--decision', cf=('client', 'decision'), + choices=['whitelist', 'blacklist', 'none'], + help='Run client in server decision list mode'), + Bcfg2.Options.BooleanOption( + "-O", "--no-lock", help='Omit lock check'), + Bcfg2.Options.PathOption( + cf=('components', 'lockfile'), default='/var/lock/bcfg2.run', + help='Client lock file'), + Bcfg2.Options.BooleanOption( + "-n", "--dry-run", help='Do not actually change the system'), + Bcfg2.Options.Option( + "-D", "--drivers", cf=('client', 'drivers'), + type=Bcfg2.Options.Types.comma_list, + default=[m[1] for m in walk_packages(path=Tools.__path__)], + action=ClientDriverAction, help='Client drivers'), + Bcfg2.Options.BooleanOption( + "-e", "--show-extra", help='Enable extra entry output'), + Bcfg2.Options.BooleanOption( + "-k", "--kevlar", help='Run in bulletproof mode'), + Bcfg2.Options.BooleanOption( + "-i", "--only-important", + help='Only configure the important entries')] + + def __init__(self): + self.config = None + self._proxy = None + self.logger = logging.getLogger('bcfg2') + self.cmd = Executor(Bcfg2.Options.setup.probe_timeout) + self.tools = [] + self.times = dict() + self.times['initialization'] = time.time() + + if Bcfg2.Options.setup.bundle_quick: + if (not Bcfg2.Options.setup.only_bundles and + not Bcfg2.Options.setup.except_bundles): + self.logger.error("-Q option requires -b or -B") + raise SystemExit(1) + if Bcfg2.Options.setup.remove == 'services': + self.logger.error("Service removal is nonsensical; " + "removed services will only be disabled") + if not Bcfg2.Options.setup.server.startswith('https://'): + Bcfg2.Options.setup.server = \ + 'https://' + Bcfg2.Options.setup.server + + #: A dict of the state of each entry. Keys are the entries. + #: Values are boolean: True means that the entry is good, + #: False means that the entry is bad. + self.states = {} + self.whitelist = [] + self.blacklist = [] + self.removal = [] + self.unhandled = [] + self.logger = logging.getLogger(__name__) + + def _probe_failure(self, probename, msg): + """ handle failure of a probe in the way the user wants us to + (exit or continue) """ + message = "Failed to execute probe %s: %s" % (probename, msg) + if Bcfg2.Options.setup.exit_on_probe_failure: + self.fatal_error(message) + else: + self.logger.error(message) + + def run_probe(self, probe): + """Execute probe.""" + name = probe.get('name') + self.logger.info("Running probe %s" % name) + ret = XML.Element("probe-data", name=name, source=probe.get('source')) + try: + scripthandle, scriptname = tempfile.mkstemp() + if sys.hexversion >= 0x03000000: + script = os.fdopen(scripthandle, 'w', + encoding=Bcfg2.Options.setup.encoding) + else: + script = os.fdopen(scripthandle, 'w') + try: + script.write("#!%s\n" % + (probe.attrib.get('interpreter', '/bin/sh'))) + if sys.hexversion >= 0x03000000: + script.write(probe.text) + else: + script.write(probe.text.encode('utf-8')) + script.close() + os.chmod(scriptname, + stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | + stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | + stat.S_IWUSR) # 0755 + rv = self.cmd.run(scriptname) + if rv.stderr: + self.logger.warning("Probe %s has error output: %s" % + (name, rv.stderr)) + if not rv.success: + self._probe_failure(name, "Return value %s" % rv.retval) + self.logger.info("Probe %s has result:" % name) + self.logger.info(rv.stdout) + if sys.hexversion >= 0x03000000: + ret.text = rv.stdout + else: + ret.text = rv.stdout.decode('utf-8') + finally: + os.unlink(scriptname) + except SystemExit: + raise + except: + self._probe_failure(name, sys.exc_info()[1]) + return ret + + def fatal_error(self, message): + """Signal a fatal error.""" + self.logger.error("Fatal error: %s" % (message)) raise SystemExit(1) + + @property + def proxy(self): + """ get an XML-RPC proxy to the server """ + if self._proxy is None: + self._proxy = Proxy.ComponentProxy() + return self._proxy + + def run_probes(self): + """ run probes and upload probe data """ + try: + probes = XML.XML(str(self.proxy.GetProbes())) + except (Proxy.ProxyError, + Proxy.CertificateError, + socket.gaierror, + socket.error): + err = sys.exc_info()[1] + self.fatal_error("Failed to download probes from bcfg2: %s" % err) + except XML.ParseError: + err = sys.exc_info()[1] + self.fatal_error("Server returned invalid probe requests: %s" % + err) + + self.times['probe_download'] = time.time() + + # execute probes + probedata = XML.Element("ProbeData") + for probe in probes.findall(".//probe"): + probedata.append(self.run_probe(probe)) + + if len(probes.findall(".//probe")) > 0: + try: + # upload probe responses + self.proxy.RecvProbeData( + XML.tostring(probedata, + xml_declaration=False).decode('utf-8')) + except Proxy.ProxyError: + err = sys.exc_info()[1] + self.fatal_error("Failed to upload probe data: %s" % err) + + self.times['probe_upload'] = time.time() + + def get_config(self): + """ load the configuration, either from the cached + configuration file (-f), or from the server """ + if Bcfg2.Options.setup.file: + # read config from file + try: + self.logger.debug("Reading cached configuration from %s" % + Bcfg2.Options.setup.file.name) + return Bcfg2.Options.setup.file.read() + except IOError: + self.fatal_error("Failed to read cached configuration from: %s" + % Bcfg2.Options.setup.file.name) + else: + # retrieve config from server + if Bcfg2.Options.setup.profile: + try: + self.proxy.AssertProfile(Bcfg2.Options.setup.profile) + except Proxy.ProxyError: + err = sys.exc_info()[1] + self.fatal_error("Failed to set client profile: %s" % err) + + try: + self.proxy.DeclareVersion(__version__) + except (xmlrpclib.Fault, + Proxy.ProxyError, + Proxy.CertificateError, + socket.gaierror, + socket.error): + err = sys.exc_info()[1] + self.fatal_error("Failed to declare version: %s" % err) + + self.run_probes() + + if Bcfg2.Options.setup.decision in ['whitelist', 'blacklist']: + try: + # TODO: read decision list from --decision-list + Bcfg2.Options.setup.decision_list = \ + self.proxy.GetDecisionList( + Bcfg2.Options.setup.decision) + self.logger.info("Got decision list from server:") + self.logger.info(Bcfg2.Options.setup.decision_list) + except Proxy.ProxyError: + err = sys.exc_info()[1] + self.fatal_error("Failed to get decision list: %s" % err) + + try: + rawconfig = self.proxy.GetConfig().encode('utf-8') + except Proxy.ProxyError: + err = sys.exc_info()[1] + self.fatal_error("Failed to download configuration from " + "Bcfg2: %s" % err) + + self.times['config_download'] = time.time() + + if Bcfg2.Options.setup.cache: + try: + Bcfg2.Options.setup.cache.write(rawconfig) + os.chmod(Bcfg2.Options.setup.cache.name, 384) # 0600 + except IOError: + self.logger.warning("Failed to write config cache file %s" % + (Bcfg2.Options.setup.cache)) + self.times['caching'] = time.time() + + return rawconfig + + def parse_config(self, rawconfig): + """ Parse the XML configuration received from the Bcfg2 server """ + try: + self.config = XML.XML(rawconfig) + except XML.ParseError: + syntax_error = sys.exc_info()[1] + self.fatal_error("The configuration could not be parsed: %s" % + syntax_error) + + self.load_tools() + + # find entries not handled by any tools + self.unhandled = [entry for struct in self.config + for entry in struct + if entry not in self.handled] + + if self.unhandled: + self.logger.error("The following entries are not handled by any " + "tool:") + for entry in self.unhandled: + self.logger.error("%s:%s:%s" % (entry.tag, entry.get('type'), + entry.get('name'))) + + # find duplicates + self.find_dups(self.config) + + pkgs = [(entry.get('name'), entry.get('origin')) + for struct in self.config + for entry in struct + if entry.tag == 'Package'] + if pkgs: + self.logger.debug("The following packages are specified in bcfg2:") + self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] is None]) + self.logger.debug("The following packages are prereqs added by " + "Packages:") + self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == 'Packages']) + + self.times['config_parse'] = time.time() + + def run(self): + """Perform client execution phase.""" + # begin configuration + self.times['start'] = time.time() + + self.logger.info("Starting Bcfg2 client run at %s" % + self.times['start']) + + self.parse_config(self.get_config().decode('utf-8')) + + if self.config.tag == 'error': + self.fatal_error("Server error: %s" % (self.config.text)) + + if Bcfg2.Options.setup.bundle_quick: + newconfig = XML.XML('') + for bundle in self.config.getchildren(): + name = bundle.get("name") + if (name and (name in Bcfg2.Options.setup.only_bundles or + name not in Bcfg2.Options.setup.except_bundles)): + newconfig.append(bundle) + self.config = newconfig + + if not Bcfg2.Options.setup.no_lock: + # check lock here + try: + lockfile = open(Bcfg2.Options.setup.lockfile, 'w') + if locked(lockfile.fileno()): + self.fatal_error("Another instance of Bcfg2 is running. " + "If you want to bypass the check, run " + "with the -O/--no-lock option") + except SystemExit: + raise + except: + lockfile = None + self.logger.error("Failed to open lockfile %s: %s" % + (Bcfg2.Options.setup.lockfile, + sys.exc_info()[1])) + + # execute the configuration + self.Execute() + + if not Bcfg2.Options.setup.no_lock: + # unlock here + if lockfile: + try: + fcntl.lockf(lockfile.fileno(), fcntl.LOCK_UN) + os.remove(Bcfg2.Options.setup.lockfile) + except OSError: + self.logger.error("Failed to unlock lockfile %s" % + lockfile.name) + + if (not Bcfg2.Options.setup.file and + not Bcfg2.Options.setup.bundle_quick): + # upload statistics + feedback = self.GenerateStats() + + try: + self.proxy.RecvStats( + XML.tostring(feedback, + xml_declaration=False).decode('utf-8')) + except Proxy.ProxyError: + err = sys.exc_info()[1] + self.logger.error("Failed to upload configuration statistics: " + "%s" % err) + raise SystemExit(2) + + self.logger.info("Finished Bcfg2 client run at %s" % time.time()) + + def load_tools(self): + """ Load all applicable client tools """ + for tool in Bcfg2.Options.setup.drivers: + try: + self.tools.append(tool(self.config)) + except Tools.ToolInstantiationError: + continue + except: + self.logger.error("Failed to instantiate tool %s" % tool, + exc_info=1) + + for tool in self.tools[:]: + for conflict in getattr(tool, 'conflicts', []): + for item in self.tools: + if item.name == conflict: + self.tools.remove(item) + + self.logger.info("Loaded tool drivers:") + self.logger.info([tool.name for tool in self.tools]) + + deprecated = [tool.name for tool in self.tools if tool.deprecated] + if deprecated: + self.logger.warning("Loaded deprecated tool drivers:") + self.logger.warning(deprecated) + experimental = [tool.name for tool in self.tools if tool.experimental] + if experimental: + self.logger.warning("Loaded experimental tool drivers:") + self.logger.warning(experimental) + + def find_dups(self, config): + """ Find duplicate entries and warn about them """ + entries = dict() + for struct in config: + for entry in struct: + for tool in self.tools: + if tool.handlesEntry(entry): + pkey = tool.primarykey(entry) + if pkey in entries: + entries[pkey] += 1 + else: + entries[pkey] = 1 + multi = [e for e, c in entries.items() if c > 1] + if multi: + self.logger.debug("The following entries are included multiple " + "times:") + for entry in multi: + self.logger.debug(entry) + + def promptFilter(self, msg, entries): + """Filter a supplied list based on user input.""" + ret = [] + entries.sort(key=lambda e: e.tag + ":" + e.get('name')) + for entry in entries[:]: + if entry in self.unhandled: + # don't prompt for entries that can't be installed + continue + if 'qtext' in entry.attrib: + iprompt = entry.get('qtext') + else: + iprompt = msg % (entry.tag, entry.get('name')) + if prompt(iprompt): + ret.append(entry) + return ret + + def __getattr__(self, name): + if name in ['extra', 'handled', 'modified', '__important__']: + ret = [] + for tool in self.tools: + ret += getattr(tool, name) + return ret + elif name in self.__dict__: + return self.__dict__[name] + raise AttributeError(name) + + def InstallImportant(self): + """Install important entries + + We also process the decision mode stuff here because we want to prevent + non-whitelisted/blacklisted 'important' entries from being installed + prior to determining the decision mode on the client. + """ + # Need to process decision stuff early so that dryrun mode + # works with it + self.whitelist = [entry for entry in self.states + if not self.states[entry]] + if not Bcfg2.Options.setup.file: + if Bcfg2.Options.setup.decision == 'whitelist': + dwl = Bcfg2.Options.setup.decision_list + w_to_rem = [e for e in self.whitelist + if not matches_white_list(e, dwl)] + if w_to_rem: + self.logger.info("In whitelist mode: " + "suppressing installation of:") + self.logger.info(["%s:%s" % (e.tag, e.get('name')) + for e in w_to_rem]) + self.whitelist = [x for x in self.whitelist + if x not in w_to_rem] + elif Bcfg2.Options.setup.decision == 'blacklist': + b_to_rem = \ + [e for e in self.whitelist + if not + passes_black_list(e, Bcfg2.Options.setup.decision_list)] + if b_to_rem: + self.logger.info("In blacklist mode: " + "suppressing installation of:") + self.logger.info(["%s:%s" % (e.tag, e.get('name')) + for e in b_to_rem]) + self.whitelist = [x for x in self.whitelist + if x not in b_to_rem] + + # take care of important entries first + if (not Bcfg2.Options.setup.dry_run or + Bcfg2.Options.setup.only_important): + important_installs = set() + for parent in self.config.findall(".//Path/.."): + name = parent.get("name") + if not name or (name in Bcfg2.Options.setup.except_bundles and + name not in Bcfg2.Options.setup.only_bundles): + continue + for cfile in parent.findall("./Path"): + if (cfile.get('name') not in self.__important__ or + cfile.get('type') != 'file' or + cfile not in self.whitelist): + continue + tools = [t for t in self.tools + if t.handlesEntry(cfile) and t.canVerify(cfile)] + if not tools: + continue + if Bcfg2.Options.setup.dry_run: + important_installs.add(cfile) + continue + if (Bcfg2.Options.setup.interactive and not + self.promptFilter("Install %s: %s? (y/N):", + [cfile])): + self.whitelist.remove(cfile) + continue + try: + self.states[cfile] = tools[0].InstallPath(cfile) + if self.states[cfile]: + tools[0].modified.append(cfile) + except: # pylint: disable=W0702 + self.logger.error("Unexpected tool failure", + exc_info=1) + cfile.set('qtext', '') + if tools[0].VerifyPath(cfile, []): + self.whitelist.remove(cfile) + if Bcfg2.Options.setup.dry_run and len(important_installs) > 0: + self.logger.info("In dryrun mode: " + "suppressing entry installation for:") + self.logger.info(["%s:%s" % (e.tag, e.get('name')) + for e in important_installs]) + + def Inventory(self): + """ + Verify all entries, + find extra entries, + and build up workqueues + + """ + # initialize all states + for struct in self.config.getchildren(): + for entry in struct.getchildren(): + self.states[entry] = False + for tool in self.tools: + try: + self.states.update(tool.Inventory()) + except KeyboardInterrupt: + raise + except: # pylint: disable=W0702 + self.logger.error("%s.Inventory() call failed:" % tool.name, + exc_info=1) + + def Decide(self): # pylint: disable=R0912 + """Set self.whitelist based on user interaction.""" + iprompt = "Install %s: %s? (y/N): " + rprompt = "Remove %s: %s? (y/N): " + if Bcfg2.Options.setup.remove: + if Bcfg2.Options.setup.remove == 'all': + self.removal = self.extra + elif Bcfg2.Options.setup.remove == 'services': + self.removal = [entry for entry in self.extra + if entry.tag == 'Service'] + elif Bcfg2.Options.setup.remove == 'packages': + self.removal = [entry for entry in self.extra + if entry.tag == 'Package'] + elif Bcfg2.Options.setup.remove == 'users': + self.removal = [entry for entry in self.extra + if entry.tag in ['POSIXUser', 'POSIXGroup']] + + candidates = [entry for entry in self.states + if not self.states[entry]] + + if Bcfg2.Options.setup.dry_run: + if self.whitelist: + self.logger.info("In dryrun mode: " + "suppressing entry installation for:") + self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) + for entry in self.whitelist]) + self.whitelist = [] + if self.removal: + self.logger.info("In dryrun mode: " + "suppressing entry removal for:") + self.logger.info(["%s:%s" % (entry.tag, entry.get('name')) + for entry in self.removal]) + self.removal = [] + + # Here is where most of the work goes + # first perform bundle filtering + all_bundle_names = [b.get('name') + for b in self.config.findall('./Bundle')] + bundles = self.config.getchildren() + if Bcfg2.Options.setup.only_bundles: + # warn if non-existent bundle given + for bundle in Bcfg2.Options.setup.only_bundles: + if bundle not in all_bundle_names: + self.logger.info("Warning: Bundle %s not found" % bundle) + bundles = [b for b in bundles + if b.get('name') in Bcfg2.Options.setup.only_bundles] + if Bcfg2.Options.setup.except_bundles: + # warn if non-existent bundle given + if not Bcfg2.Options.setup.bundle_quick: + for bundle in Bcfg2.Options.setup.except_bundles: + if bundle not in all_bundle_names: + self.logger.info("Warning: Bundle %s not found" % + bundle) + bundles = [ + b for b in bundles + if b.get('name') not in Bcfg2.Options.setup.except_bundles] + self.whitelist = [e for e in self.whitelist + if any(e in b for b in bundles)] + + # first process prereq actions + for bundle in bundles[:]: + if bundle.tag == 'Bundle': + bmodified = any((item in self.whitelist or + item in self.modified) for item in bundle) + else: + bmodified = False + actions = [a for a in bundle.findall('./Action') + if (a.get('timing') in ['pre', 'both'] and + (bmodified or a.get('when') == 'always'))] + # now we process all "pre" and "both" actions that are either + # always or the bundle has been modified + if Bcfg2.Options.setup.interactive: + self.promptFilter(iprompt, actions) + self.DispatchInstallCalls(actions) + + if bundle.tag != 'Bundle': + continue + + # need to test to fail entries in whitelist + if not all(self.states[a] for a in actions): + # then display bundles forced off with entries + self.logger.info("%s %s failed prerequisite action" % + (bundle.tag, bundle.get('name'))) + bundles.remove(bundle) + b_to_remv = [ent for ent in self.whitelist if ent in bundle] + if b_to_remv: + self.logger.info("Not installing entries from %s %s" % + (bundle.tag, bundle.get('name'))) + self.logger.info(["%s:%s" % (e.tag, e.get('name')) + for e in b_to_remv]) + for ent in b_to_remv: + self.whitelist.remove(ent) + + self.logger.debug("Installing entries in the following bundle(s):") + self.logger.debug(" %s" % ", ".join(b.get("name") for b in bundles + if b.get("name"))) + + if Bcfg2.Options.setup.interactive: + self.whitelist = self.promptFilter(iprompt, self.whitelist) + self.removal = self.promptFilter(rprompt, self.removal) + + for entry in candidates: + if entry not in self.whitelist: + self.blacklist.append(entry) + + def DispatchInstallCalls(self, entries): + """Dispatch install calls to underlying tools.""" + for tool in self.tools: + handled = [entry for entry in entries if tool.canInstall(entry)] + if not handled: + continue + try: + self.states.update(tool.Install(handled)) + except KeyboardInterrupt: + raise + except: # pylint: disable=W0702 + self.logger.error("%s.Install() call failed:" % tool.name, + exc_info=1) + + def Install(self): + """Install all entries.""" + self.DispatchInstallCalls(self.whitelist) + mods = self.modified + mbundles = [struct for struct in self.config.findall('Bundle') + if any(True for mod in mods if mod in struct)] + + if self.modified: + # Handle Bundle interdeps + if mbundles: + self.logger.info("The Following Bundles have been modified:") + self.logger.info([mbun.get('name') for mbun in mbundles]) + tbm = [(t, b) for t in self.tools for b in mbundles] + for tool, bundle in tbm: + try: + self.states.update(tool.Inventory(structures=[bundle])) + except KeyboardInterrupt: + raise + except: # pylint: disable=W0702 + self.logger.error("%s.Inventory() call failed:" % + tool.name, + exc_info=1) + clobbered = [entry for bundle in mbundles for entry in bundle + if (not self.states[entry] and + entry not in self.blacklist)] + if clobbered: + self.logger.debug("Found clobbered entries:") + self.logger.debug(["%s:%s" % (entry.tag, entry.get('name')) + for entry in clobbered]) + if not Bcfg2.Options.setup.interactive: + self.DispatchInstallCalls(clobbered) + + all_bundles = self.config.findall('./Bundle') + mbundles.extend(self._get_all_modified_bundles(mbundles, all_bundles)) + + for bundle in all_bundles: + if (Bcfg2.Options.setup.only_bundles and + bundle.get('name') not in + Bcfg2.Options.setup.only_bundles): + # prune out unspecified bundles when running with -b + continue + if bundle in mbundles: + continue + + self.logger.debug("Bundle %s was not modified" % + bundle.get('name')) + for tool in self.tools: + try: + self.states.update(tool.BundleNotUpdated(bundle)) + except KeyboardInterrupt: + raise + except: # pylint: disable=W0702 + self.logger.error('%s.BundleNotUpdated(%s:%s) call failed:' + % (tool.name, bundle.tag, + bundle.get('name')), exc_info=1) + + for indep in self.config.findall('.//Independent'): + for tool in self.tools: + try: + self.states.update(tool.BundleNotUpdated(indep)) + except KeyboardInterrupt: + raise + except: # pylint: disable=W0702 + self.logger.error("%s.BundleNotUpdated(%s:%s) call failed:" + % (tool.name, indep.tag, + indep.get("name")), exc_info=1) + + def _get_all_modified_bundles(self, mbundles, all_bundles): + """This gets all modified bundles by calling BundleUpdated until no + new bundles get added to the modification list.""" + new_mbundles = mbundles + add_mbundles = [] + + while new_mbundles: + for bundle in self.config.findall('./Bundle'): + if (Bcfg2.Options.setup.only_bundles and + bundle.get('name') not in + Bcfg2.Options.setup.only_bundles): + # prune out unspecified bundles when running with -b + continue + if bundle not in new_mbundles: + continue + + self.logger.debug('Bundle %s was modified' % + bundle.get('name')) + for tool in self.tools: + try: + self.states.update(tool.BundleUpdated(bundle)) + except: # pylint: disable=W0702 + self.logger.error('%s.BundleUpdated(%s:%s) call ' + 'failed:' % (tool.name, bundle.tag, + bundle.get("name")), + exc_info=1) + + mods = self.modified + new_mbundles = [struct for struct in all_bundles + if any(True for mod in mods if mod in struct) and + struct not in mbundles + add_mbundles] + add_mbundles.extend(new_mbundles) + + return add_mbundles + + def Remove(self): + """Remove extra entries.""" + for tool in self.tools: + extras = [entry for entry in self.removal + if tool.handlesEntry(entry)] + if extras: + try: + tool.Remove(extras) + except: # pylint: disable=W0702 + self.logger.error("%s.Remove() failed" % tool.name, + exc_info=1) + + def CondDisplayState(self, phase): + """Conditionally print tracing information.""" + self.logger.info('Phase: %s' % phase) + self.logger.info('Correct entries: %d' % + list(self.states.values()).count(True)) + self.logger.info('Incorrect entries: %d' % + list(self.states.values()).count(False)) + if phase == 'final' and list(self.states.values()).count(False): + for entry in sorted(self.states.keys(), key=lambda e: e.tag + ":" + + e.get('name')): + if not self.states[entry]: + etype = entry.get('type') + if etype: + self.logger.info("%s:%s:%s" % (entry.tag, etype, + entry.get('name'))) + else: + self.logger.info("%s:%s" % (entry.tag, + entry.get('name'))) + self.logger.info('Total managed entries: %d' % + len(list(self.states.values()))) + self.logger.info('Unmanaged entries: %d' % len(self.extra)) + if phase == 'final' and Bcfg2.Options.setup.show_extra: + for entry in sorted(self.extra, + key=lambda e: e.tag + ":" + e.get('name')): + etype = entry.get('type') + if etype: + self.logger.info("%s:%s:%s" % (entry.tag, etype, + entry.get('name'))) + else: + self.logger.info("%s:%s" % (entry.tag, + entry.get('name'))) + + if ((list(self.states.values()).count(False) == 0) and not self.extra): + self.logger.info('All entries correct.') + + def ReInventory(self): + """Recheck everything.""" + if not Bcfg2.Options.setup.dry_run and Bcfg2.Options.setup.kevlar: + self.logger.info("Rechecking system inventory") + self.Inventory() + + def Execute(self): + """Run all methods.""" + self.Inventory() + self.times['inventory'] = time.time() + self.CondDisplayState('initial') + self.InstallImportant() + if not Bcfg2.Options.setup.only_important: + self.Decide() + self.Install() + self.times['install'] = time.time() + self.Remove() + self.times['remove'] = time.time() + + if self.modified: + self.ReInventory() + self.times['reinventory'] = time.time() + self.times['finished'] = time.time() + self.CondDisplayState('final') + + def GenerateStats(self): + """Generate XML summary of execution statistics.""" + states = {} + for (item, val) in list(self.states.items()): + if not Bcfg2.Options.setup.only_important or \ + item.get('important', 'false').lower() == 'true': + states[item] = val + + feedback = XML.Element("upload-statistics") + stats = XML.SubElement(feedback, + 'Statistics', total=str(len(states)), + version='2.0', + revision=self.config.get('revision', '-1')) + flags = XML.SubElement(stats, "Flags") + XML.SubElement(flags, "Flag", name="dry_run", + value=str(Bcfg2.Options.setup.dry_run)) + XML.SubElement(flags, "Flag", name="only_important", + value=str(Bcfg2.Options.setup.only_important)) + good_entries = [key for key, val in list(states.items()) if val] + good = len(good_entries) + stats.set('good', str(good)) + if any(not val for val in list(states.values())): + stats.set('state', 'dirty') + else: + stats.set('state', 'clean') + + # List bad elements of the configuration + for (data, ename) in [(self.modified, 'Modified'), + (self.extra, "Extra"), + (good_entries, "Good"), + ([entry for entry in states + if not states[entry]], "Bad")]: + container = XML.SubElement(stats, ename) + for item in data: + new_item = copy.deepcopy(item) + new_item.set('qtext', '') + container.append(new_item) + new_item.text = None + + timeinfo = XML.Element("OpStamps") + feedback.append(stats) + for (event, timestamp) in list(self.times.items()): + timeinfo.set(event, str(timestamp)) + stats.append(timeinfo) + return feedback diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Proxy.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Proxy.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Proxy.py 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Proxy.py 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,347 @@ +import os.path +import re +import sys +import time +import socket +import logging +import Bcfg2.Options +from Bcfg2.Compat import httplib, xmlrpclib, urlparse, quote_plus + +# The ssl module is provided by either Python 2.6 or a separate ssl +# package that works on older versions of Python (see +# http://pypi.python.org/pypi/ssl). If neither can be found, look for +# M2Crypto instead. +try: + import ssl + SSL_ERROR = ssl.SSLError +except ImportError: + raise Exception("No SSL module support") + + +version = sys.version_info[:2] +has_py26 = version >= (2, 6) +has_py32 = version >= (3, 2) + +__all__ = ["ComponentProxy", + "RetryMethod", + "SSLHTTPConnection", + "XMLRPCTransport"] + + +class ProxyError(Exception): + """ ProxyError provides a consistent reporting interface to + the various xmlrpclib errors that might arise (mainly + ProtocolError and Fault) """ + def __init__(self, err): + msg = None + if isinstance(err, xmlrpclib.ProtocolError): + # cut out the password in the URL + url = re.sub(r'([^:]+):(.*?)@([^@]+:\d+/)', r'\1:******@\3', + err.url) + msg = "XML-RPC Protocol Error for %s: %s (%s)" % (url, + err.errmsg, + err.errcode) + elif isinstance(err, xmlrpclib.Fault): + msg = "XML-RPC Fault: %s (%s)" % (err.faultString, + err.faultCode) + else: + msg = str(err) + Exception.__init__(self, msg) + + +class CertificateError(Exception): + def __init__(self, commonName): + self.commonName = commonName + + def __str__(self): + return ("Got unallowed commonName %s from server" + % self.commonName) + + +_orig_Method = xmlrpclib._Method + + +class RetryMethod(xmlrpclib._Method): + """Method with error handling and retries built in.""" + log = logging.getLogger('xmlrpc') + max_retries = 3 + retry_delay = 1 + + def __call__(self, *args): + for retry in range(self.max_retries): + if retry >= self.max_retries - 1: + final = True + else: + final = False + msg = None + try: + return _orig_Method.__call__(self, *args) + except xmlrpclib.ProtocolError: + err = sys.exc_info()[1] + msg = "Server failure: Protocol Error: %s %s" % \ + (err.errcode, err.errmsg) + except xmlrpclib.Fault: + msg = sys.exc_info()[1] + except socket.error: + err = sys.exc_info()[1] + if hasattr(err, 'errno') and err.errno == 336265218: + msg = "SSL Key error: %s" % err + elif hasattr(err, 'errno') and err.errno == 185090050: + msg = "SSL CA error: %s" % err + elif final: + msg = "Server failure: %s" % err + except CertificateError: + err = sys.exc_info()[1] + msg = "Got unallowed commonName %s from server" % \ + err.commonName + except KeyError: + err = sys.exc_info()[1] + msg = "Server disallowed connection: %s" % err + except ProxyError: + err = sys.exc_info()[1] + msg = err + except: + etype, err = sys.exc_info()[:2] + msg = "Unknown failure: %s (%s)" % (err, etype.__name__) + if msg: + if final: + self.log.error(msg) + raise ProxyError(msg) + else: + self.log.info(msg) + time.sleep(self.retry_delay) + +xmlrpclib._Method = RetryMethod + + +class SSLHTTPConnection(httplib.HTTPConnection): + """Extension of HTTPConnection that + implements SSL and related behaviors. + """ + + def __init__(self, host, port=None, strict=None, timeout=90, key=None, + cert=None, ca=None, scns=None, protocol='xmlrpc/tlsv1'): + """Initializes the `httplib.HTTPConnection` object and stores security + parameters + + Parameters + ---------- + host : string + Name of host to contact + port : int, optional + Port on which to contact the host. If none is specified, + the default port of 80 will be used unless the `host` + string has a port embedded in the form host:port. + strict : Boolean, optional + Passed to the `httplib.HTTPConnection` constructor and if + True, causes the `BadStatusLine` exception to be raised if + the status line cannot be parsed as a valid HTTP 1.0 or + 1.1 status. + timeout : int, optional + Causes blocking operations to timeout after `timeout` + seconds. + key : string, optional + The file system path to the local endpoint's SSL key. May + specify the same file as `cert` if using a file that + contains both. See + http://docs.python.org/library/ssl.html#ssl-certificates + for details. Required if using client certificate + authentication. + cert : string, optional + The file system path to the local endpoint's SSL + certificate. May specify the same file as `cert` if using + a file that contains both. See + http://docs.python.org/library/ssl.html#ssl-certificates + for details. Required if using client certificate + authentication. + ca : string, optional + The file system path to a set of concatenated certificate + authority certs, which are used to validate certificates + passed from the other end of the connection. + scns : array-like, optional + List of acceptable server commonNames. The peer cert's + common name must appear in this list, otherwise the + connect() call will throw a `CertificateError`. + protocol : {'xmlrpc/ssl', 'xmlrpc/tlsv1'}, optional + Communication protocol to use. + + """ + if not has_py26: + httplib.HTTPConnection.__init__(self, host, port, strict) + elif not has_py32: + httplib.HTTPConnection.__init__(self, host, port, strict, timeout) + else: + # the strict parameter is deprecated. + # HTTP 0.9-style "Simple Responses" are not supported anymore. + httplib.HTTPConnection.__init__(self, host, port, timeout=timeout) + self.logger = logging.getLogger("%s.%s" % (self.__class__.__module__, + self.__class__.__name__)) + self.key = key + self.cert = cert + self.ca = ca + self.scns = scns + self.protocol = protocol + self.timeout = timeout + + def connect(self): + """Initiates a connection using the ssl module.""" + # check for IPv6 + hostip = socket.getaddrinfo(self.host, + self.port, + socket.AF_UNSPEC, + socket.SOCK_STREAM)[0][4][0] + if ':' in hostip: + rawsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) + else: + rawsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if self.protocol == 'xmlrpc/ssl': + ssl_protocol_ver = ssl.PROTOCOL_SSLv23 + elif self.protocol == 'xmlrpc/tlsv1': + ssl_protocol_ver = ssl.PROTOCOL_TLSv1 + else: + self.logger.error("Unknown protocol %s" % (self.protocol)) + raise Exception("unknown protocol %s" % self.protocol) + if self.ca: + other_side_required = ssl.CERT_REQUIRED + if not os.path.isfile(self.ca): + self.logger.error("CA specified but none found at %s" % self.ca) + else: + other_side_required = ssl.CERT_NONE + self.logger.warning("No ca is specified. Cannot authenticate the " + "server with SSL.") + if self.cert and not self.key: + self.logger.warning("SSL cert specfied, but no key. Cannot " + "authenticate this client with SSL.") + self.cert = None + if self.key and not self.cert: + self.logger.warning("SSL key specfied, but no cert. Cannot " + "authenticate this client with SSL.") + self.key = None + + rawsock.settimeout(self.timeout) + self.sock = ssl.SSLSocket(rawsock, cert_reqs=other_side_required, + ca_certs=self.ca, suppress_ragged_eofs=True, + keyfile=self.key, certfile=self.cert, + ssl_version=ssl_protocol_ver) + self.sock.connect((self.host, self.port)) + peer_cert = self.sock.getpeercert() + if peer_cert and self.scns: + scn = [x[0][1] for x in peer_cert['subject'] + if x[0][0] == 'commonName'][0] + if scn not in self.scns: + raise CertificateError(scn) + self.sock.closeSocket = True + + +class XMLRPCTransport(xmlrpclib.Transport): + def __init__(self, key=None, cert=None, ca=None, + scns=None, use_datetime=0, timeout=90, + protocol='xmlrpc/tlsv1'): + if hasattr(xmlrpclib.Transport, '__init__'): + xmlrpclib.Transport.__init__(self, use_datetime) + self.key = key + self.cert = cert + self.ca = ca + self.scns = scns + self.timeout = timeout + self.protocol = protocol + + def make_connection(self, host): + host, self._extra_headers = self.get_host_info(host)[0:2] + return SSLHTTPConnection(host, + key=self.key, + cert=self.cert, + ca=self.ca, + scns=self.scns, + timeout=self.timeout, + protocol=self.protocol) + + def request(self, host, handler, request_body, verbose=0): + """Send request to server and return response.""" + try: + conn = self.send_request(host, handler, request_body, False) + response = conn.getresponse() + errcode = response.status + errmsg = response.reason + headers = response.msg + except (socket.error, SSL_ERROR, httplib.BadStatusLine): + err = sys.exc_info()[1] + raise ProxyError(xmlrpclib.ProtocolError(host + handler, + 408, + str(err), + self._extra_headers)) + + if errcode != 200: + raise ProxyError(xmlrpclib.ProtocolError(host + handler, + errcode, + errmsg, + headers)) + + self.verbose = verbose + return self.parse_response(response) + + if sys.hexversion < 0x03000000: + # pylint: disable=E1101 + def send_request(self, host, handler, request_body, debug): + """ send_request() changed significantly in py3k.""" + conn = self.make_connection(host) + xmlrpclib.Transport.send_request(self, conn, handler, request_body) + self.send_host(conn, host) + self.send_user_agent(conn) + self.send_content(conn, request_body) + return conn + # pylint: enable=E1101 + + +class ComponentProxy(xmlrpclib.ServerProxy): + """Constructs proxies to components. """ + + options = [ + Bcfg2.Options.Common.location, Bcfg2.Options.Common.ssl_ca, + Bcfg2.Options.Common.password, Bcfg2.Options.Common.client_timeout, + Bcfg2.Options.Common.protocol, + Bcfg2.Options.PathOption( + '--ssl-key', cf=('communication', 'key'), dest="key", + help='Path to SSL key'), + Bcfg2.Options.PathOption( + cf=('communication', 'certificate'), dest="cert", + help='Path to SSL certificate'), + Bcfg2.Options.Option( + "-u", "--user", default="root", cf=('communication', 'user'), + help='The user to provide for authentication'), + Bcfg2.Options.Option( + "-R", "--retries", type=int, default=3, + cf=('communication', 'retries'), + help='The number of times to retry network communication'), + Bcfg2.Options.Option( + "-y", "--retry-delay", type=int, default=1, + cf=('communication', 'retry_delay'), + help='The time in seconds to wait between retries'), + Bcfg2.Options.Option( + '--ssl-cns', cf=('communication', 'serverCommonNames'), + dest="ssl_cns", + type=Bcfg2.Options.Types.colon_list, + help='List of server commonNames')] + + def __init__(self): + RetryMethod.max_retries = Bcfg2.Options.setup.retries + RetryMethod.retry_delay = Bcfg2.Options.setup.retry_delay + + if Bcfg2.Options.setup.user and Bcfg2.Options.setup.password: + method, path = urlparse(Bcfg2.Options.setup.server)[:2] + url = "%s://%s:%s@%s" % ( + method, + quote_plus(Bcfg2.Options.setup.user, ''), + quote_plus(Bcfg2.Options.setup.password, ''), + path) + else: + url = Bcfg2.Options.setup.server + ssl_trans = XMLRPCTransport( + key=Bcfg2.Options.setup.key, + cert=Bcfg2.Options.setup.cert, + ca=Bcfg2.Options.setup.ca, + scns=Bcfg2.Options.setup.ssl_cns, + timeout=Bcfg2.Options.setup.client_timeout, + protocol=Bcfg2.Options.setup.protocol) + xmlrpclib.ServerProxy.__init__(self, url, + allow_none=True, transport=ssl_trans) diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/Action.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/Action.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/Action.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/Action.py 2017-01-10 19:18:17.000000000 +0000 @@ -1,34 +1,14 @@ """Action driver""" -import os -import sys -import select import Bcfg2.Client.Tools -from Bcfg2.Client.Frame import matches_white_list, passes_black_list -from Bcfg2.Compat import input # pylint: disable=W0622 +from Bcfg2.Utils import safe_input class Action(Bcfg2.Client.Tools.Tool): """Implement Actions""" name = 'Action' - __handles__ = [('PostInstall', None), ('Action', None)] - __req__ = {'PostInstall': ['name'], - 'Action': ['name', 'timing', 'when', 'command', 'status']} - - def _action_allowed(self, action): - """ Return true if the given action is allowed to be run by - the whitelist or blacklist """ - if self.setup['decision'] == 'whitelist' and \ - not matches_white_list(action, self.setup['decision_list']): - self.logger.info("In whitelist mode: suppressing Action: %s" % - action.get('name')) - return False - if self.setup['decision'] == 'blacklist' and \ - not passes_black_list(action, self.setup['decision_list']): - self.logger.info("In blacklist mode: suppressing Action: %s" % - action.get('name')) - return False - return True + __handles__ = [('Action', None)] + __req__ = {'Action': ['name', 'timing', 'when', 'command', 'status']} def RunAction(self, entry): """This method handles command execution and status return.""" @@ -38,19 +18,15 @@ shell = True shell_string = '(in shell) ' - if not self.setup['dryrun']: - if self.setup['interactive']: + if not Bcfg2.Options.setup.dry_run: + if Bcfg2.Options.setup.interactive: prompt = ('Run Action %s%s, %s: (y/N): ' % (shell_string, entry.get('name'), entry.get('command'))) - # flush input buffer - while len(select.select([sys.stdin.fileno()], [], [], - 0.0)[0]) > 0: - os.read(sys.stdin.fileno(), 4096) - ans = input(prompt) + ans = safe_input(prompt) if ans not in ['y', 'Y']: return False - if self.setup['servicemode'] == 'build': + if Bcfg2.Options.setup.service_mode == 'build': if entry.get('build', 'true') == 'false': self.logger.debug("Action: Deferring execution of %s due " "to build mode" % entry.get('command')) @@ -71,39 +47,29 @@ """Actions always verify true.""" return True - def VerifyPostInstall(self, dummy, _): - """Actions always verify true.""" - return True - def InstallAction(self, entry): """Run actions as pre-checks for bundle installation.""" if entry.get('timing') != 'post': return self.RunAction(entry) return True - def InstallPostInstall(self, entry): - """ Install a deprecated PostInstall entry """ - self.logger.warning("Installing deprecated PostInstall entry %s" % - entry.get("name")) - return self.InstallAction(entry) - - def BundleUpdated(self, bundle, states): + def BundleUpdated(self, bundle): """Run postinstalls when bundles have been updated.""" - for postinst in bundle.findall("PostInstall"): - if not self._action_allowed(postinst): - continue - self.cmd.run(postinst.get('name')) + states = dict() for action in bundle.findall("Action"): if action.get('timing') in ['post', 'both']: - if not self._action_allowed(action): + if not self._install_allowed(action): continue states[action] = self.RunAction(action) + return states - def BundleNotUpdated(self, bundle, states): + def BundleNotUpdated(self, bundle): """Run Actions when bundles have not been updated.""" + states = dict() for action in bundle.findall("Action"): - if action.get('timing') in ['post', 'both'] and \ - action.get('when') != 'modified': - if not self._action_allowed(action): + if (action.get('timing') in ['post', 'both'] and + action.get('when') != 'modified'): + if not self._install_allowed(action): continue states[action] = self.RunAction(action) + return states diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/APK.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/APK.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/APK.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/APK.py 2017-01-10 19:18:17.000000000 +0000 @@ -12,11 +12,6 @@ pkgtype = 'apk' pkgtool = ("/sbin/apk add %s", ("%s", ["name"])) - def __init__(self, logger, setup, config): - Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config) - self.installed = {} - self.RefreshPackages() - def RefreshPackages(self): """Refresh memory hashes of packages.""" names = self.cmd.run("/sbin/apk info").stdout.splitlines() @@ -38,8 +33,6 @@ if entry.attrib['name'] in self.installed: if entry.attrib['version'] in \ ['auto', self.installed[entry.attrib['name']]]: - # if (not self.setup['quick'] and - # entry.get('verify', 'true') == 'true'): # FIXME: Does APK have any sort of verification mechanism? return True else: diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/APT.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/APT.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/APT.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/APT.py 2017-01-10 19:18:17.000000000 +0000 @@ -1,32 +1,40 @@ """This is the Bcfg2 support for apt-get.""" -# suppress apt API warnings -import warnings -warnings.filterwarnings("ignore", "apt API not stable yet", - FutureWarning) -import apt.cache import os +import sys + +import apt.cache + +import Bcfg2.Options import Bcfg2.Client.Tools + class APT(Bcfg2.Client.Tools.Tool): - """The Debian toolset implements package and service operations and inherits - the rest from Toolset.Toolset. + """The Debian toolset implements package and service operations + and inherits the rest from Tools.Tool.""" + + options = Bcfg2.Client.Tools.Tool.options + [ + Bcfg2.Options.PathOption( + cf=('APT', 'install_path'), + default='/usr', dest='apt_install_path', + help='Apt tools install path'), + Bcfg2.Options.PathOption( + cf=('APT', 'var_path'), default='/var', dest='apt_var_path', + help='Apt tools var path'), + Bcfg2.Options.PathOption( + cf=('APT', 'etc_path'), default='/etc', dest='apt_etc_path', + help='System etc path')] - """ - name = 'APT' __execs__ = [] __handles__ = [('Package', 'deb'), ('Path', 'ignore')] __req__ = {'Package': ['name', 'version'], 'Path': ['type']} - def __init__(self, logger, setup, config): - Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config) + def __init__(self, config): + Bcfg2.Client.Tools.Tool.__init__(self, config) - self.install_path = setup.get('apt_install_path', '/usr') - self.var_path = setup.get('apt_var_path', '/var') - self.etc_path = setup.get('apt_etc_path', '/etc') - self.debsums = '%s/bin/debsums' % self.install_path - self.aptget = '%s/bin/apt-get' % self.install_path - self.dpkg = '%s/bin/dpkg' % self.install_path + self.debsums = '%s/bin/debsums' % Bcfg2.Options.setup.apt_install_path + self.aptget = '%s/bin/apt-get' % Bcfg2.Options.setup.apt_install_path + self.dpkg = '%s/bin/dpkg' % Bcfg2.Options.setup.apt_install_path self.__execs__ = [self.debsums, self.aptget, self.dpkg] path_entries = os.environ['PATH'].split(':') @@ -38,62 +46,65 @@ '-o DPkg::Options::=--force-confmiss ' + \ '--reinstall ' + \ '--force-yes ' - if not self.setup['debug']: + if not Bcfg2.Options.setup.debug: self.pkgcmd += '-q=2 ' self.pkgcmd += '-y install %s' - self.ignores = [entry.get('name') for struct in config \ - for entry in struct \ - if entry.tag == 'Path' and \ + self.ignores = [entry.get('name') for struct in config + for entry in struct + if entry.tag == 'Path' and entry.get('type') == 'ignore'] - self.__important__ = self.__important__ + \ - ["%s/cache/debconf/config.dat" % self.var_path, - "%s/cache/debconf/templates.dat" % self.var_path, - '/etc/passwd', '/etc/group', - '%s/apt/apt.conf' % self.etc_path, - '%s/dpkg/dpkg.cfg' % self.etc_path] + \ - [entry.get('name') for struct in config for entry in struct \ - if entry.tag == 'Path' and \ - entry.get('name').startswith('%s/apt/sources.list' % self.etc_path)] - self.nonexistent = [entry.get('name') for struct in config for entry in struct \ - if entry.tag == 'Path' and entry.get('type') == 'nonexistent'] + self.__important__ = self.__important__ + [ + "%s/cache/debconf/config.dat" % Bcfg2.Options.setup.apt_var_path, + "%s/cache/debconf/templates.dat" % + Bcfg2.Options.setup.apt_var_path, + '/etc/passwd', '/etc/group', + '%s/apt/apt.conf' % Bcfg2.Options.setup.apt_etc_path, + '%s/dpkg/dpkg.cfg' % Bcfg2.Options.setup.apt_etc_path] + \ + [entry.get('name') for struct in config + for entry in struct + if (entry.tag == 'Path' and + entry.get('name').startswith( + '%s/apt/sources.list' % + Bcfg2.Options.setup.apt_etc_path))] + self.nonexistent = [entry.get('name') for struct in config + for entry in struct + if (entry.tag == 'Path' and + entry.get('type') == 'nonexistent')] os.environ["DEBIAN_FRONTEND"] = 'noninteractive' self.actions = {} - if self.setup['kevlar'] and not self.setup['dryrun']: + if Bcfg2.Options.setup.kevlar and not Bcfg2.Options.setup.dry_run: self.cmd.run("%s --force-confold --configure --pending" % self.dpkg) self.cmd.run("%s clean" % self.aptget) try: self.pkg_cache = apt.cache.Cache() except SystemError: - e = sys.exc_info()[1] - self.logger.info("Failed to initialize APT cache: %s" % e) + err = sys.exc_info()[1] + self.logger.info("Failed to initialize APT cache: %s" % err) raise Bcfg2.Client.Tools.ToolInstantiationError - self.pkg_cache.update() + try: + self.pkg_cache.update() + except apt.cache.FetchFailedException: + err = sys.exc_info()[1] + self.logger.info("Failed to update APT cache: %s" % err) self.pkg_cache = apt.cache.Cache() - if 'req_reinstall_pkgs' in dir(self.pkg_cache): - self._newapi = True - else: - self._newapi = False def FindExtra(self): """Find extra packages.""" packages = [entry.get('name') for entry in self.getSupportedEntries()] - if self._newapi: - extras = [(p.name, p.installed.version) for p in self.pkg_cache - if p.is_installed and p.name not in packages] - else: - extras = [(p.name, p.installedVersion) for p in self.pkg_cache - if p.isInstalled and p.name not in packages] - return [Bcfg2.Client.XML.Element('Package', name=name, \ - type='deb', version=version) \ - for (name, version) in extras] + extras = [(p.name, p.installed.version) for p in self.pkg_cache + if p.is_installed and p.name not in packages] + return [Bcfg2.Client.XML.Element('Package', name=name, type='deb', + current_version=version) + for (name, version) in extras] def VerifyDebsums(self, entry, modlist): + """Verify the package contents with debsum information.""" output = \ self.cmd.run("%s -as %s" % (self.debsums, entry.get('name'))).stderr.splitlines() if len(output) == 1 and "no md5sums for" in output[0]: - self.logger.info("Package %s has no md5sums. Cannot verify" % \ + self.logger.info("Package %s has no md5sums. Cannot verify" % entry.get('name')) entry.set('qtext', "Reinstall Package %s-%s to setup md5sums? (y/N) " % @@ -113,10 +124,10 @@ # these files should not exist continue elif "is not installed" in item or "missing file" in item: - self.logger.error("Package %s is not fully installed" \ + self.logger.error("Package %s is not fully installed" % entry.get('name')) else: - self.logger.error("Got Unsupported pattern %s from debsums" \ + self.logger.error("Got Unsupported pattern %s from debsums" % item) files.append(item) files = list(set(files) - set(self.ignores)) @@ -127,65 +138,54 @@ modlist = [os.path.realpath(filename) for filename in modlist] bad = [filename for filename in files if filename not in modlist] if bad: - self.logger.debug("It is suggested that you either manage these " - "files, revert the changes, or ignore false " - "failures:") - self.logger.info("Package %s failed validation. Bad files are:" % \ - entry.get('name')) + self.logger.debug("It is suggested that you either manage " + "these files, revert the changes, or " + "ignore false failures:") + self.logger.info("Package %s failed validation. Bad files are:" + % entry.get('name')) self.logger.info(bad) - entry.set('qtext', - "Reinstall Package %s-%s to fix failing files? (y/N) " % \ - (entry.get('name'), entry.get('version'))) + entry.set( + 'qtext', + "Reinstall Package %s-%s to fix failing files? (y/N) " + % (entry.get('name'), entry.get('version'))) return False return True def VerifyPackage(self, entry, modlist, checksums=True): """Verify package for entry.""" - if not 'version' in entry.attrib: + if 'version' not in entry.attrib: self.logger.info("Cannot verify unversioned package %s" % (entry.attrib['name'])) return False pkgname = entry.get('name') - if self.pkg_cache.has_key(pkgname): - if self._newapi: - is_installed = self.pkg_cache[pkgname].is_installed - else: - is_installed = self.pkg_cache[pkgname].isInstalled - if not self.pkg_cache.has_key(pkgname) or not is_installed: + if pkgname not in self.pkg_cache or \ + not self.pkg_cache[pkgname].is_installed: self.logger.info("Package %s not installed" % (entry.get('name'))) entry.set('current_exists', 'false') return False pkg = self.pkg_cache[pkgname] - if self._newapi: - installed_version = pkg.installed.version - candidate_version = pkg.candidate.version - else: - installed_version = pkg.installedVersion - candidate_version = pkg.candidateVersion + installed_version = pkg.installed.version if entry.get('version') == 'auto': - if self._newapi: - is_upgradable = self.pkg_cache._depcache.is_upgradable(pkg._pkg) - else: - is_upgradable = self.pkg_cache._depcache.IsUpgradable(pkg._pkg) - if is_upgradable: - desiredVersion = candidate_version + if pkg.is_upgradable: + desired_version = pkg.candidate.version else: - desiredVersion = installed_version + desired_version = installed_version elif entry.get('version') == 'any': - desiredVersion = installed_version + desired_version = installed_version else: - desiredVersion = entry.get('version') - if desiredVersion != installed_version: + desired_version = entry.get('version') + if desired_version != installed_version: entry.set('current_version', installed_version) - entry.set('qtext', "Modify Package %s (%s -> %s)? (y/N) " % \ + entry.set('qtext', "Modify Package %s (%s -> %s)? (y/N) " % (entry.get('name'), entry.get('current_version'), - desiredVersion)) + desired_version)) return False else: # version matches - if not self.setup['quick'] and entry.get('verify', 'true') == 'true' \ - and checksums: + if not Bcfg2.Options.setup.quick \ + and entry.get('verify', 'true') == 'true' \ + and checksums: pkgsums = self.VerifyDebsums(entry, modlist) return pkgsums return True @@ -198,75 +198,56 @@ self.logger.info('Removing packages:') self.logger.info(pkgnames) for pkg in pkgnames.split(" "): - try: - if self._newapi: - self.pkg_cache[pkg].mark_delete(purge=True) - else: - self.pkg_cache[pkg].markDelete(purge=True) - except: - if self._newapi: - self.pkg_cache[pkg].mark_delete() - else: - self.pkg_cache[pkg].markDelete() - try: - self.pkg_cache.commit() - except SystemExit: - # thank you python-apt 0.6 - pass + self.pkg_cache[pkg].mark_delete(purge=True) + self.pkg_cache.commit() self.pkg_cache = apt.cache.Cache() self.modified += packages self.extra = self.FindExtra() - def Install(self, packages, states): + def Install(self, packages): # it looks like you can't install arbitrary versions of software # out of the pkg cache, we will still need to call apt-get ipkgs = [] bad_pkgs = [] for pkg in packages: - if not self.pkg_cache.has_key(pkg.get('name')): - self.logger.error("APT has no information about package %s" % (pkg.get('name'))) + pkgname = pkg.get('name') + if pkgname not in self.pkg_cache: + self.logger.error("APT has no information about package %s" + % pkgname) continue if pkg.get('version') in ['auto', 'any']: - if self._newapi: - try: - ipkgs.append("%s=%s" % (pkg.get('name'), - self.pkg_cache[pkg.get('name')].candidate.version)) - except AttributeError: - self.logger.error("Failed to find %s in apt package cache" % - pkg.get('name')) - continue - else: - ipkgs.append("%s=%s" % (pkg.get('name'), - self.pkg_cache[pkg.get('name')].candidateVersion)) + try: + ipkgs.append("%s=%s" % ( + pkgname, + self.pkg_cache[pkgname].candidate.version)) + except AttributeError: + self.logger.error("Failed to find %s in apt package " + "cache" % pkgname) continue - if self._newapi: - avail_vers = [x.ver_str for x in \ - self.pkg_cache[pkg.get('name')]._pkg.version_list] - else: - avail_vers = [x.VerStr for x in \ - self.pkg_cache[pkg.get('name')]._pkg.VersionList] + avail_vers = self.pkg_cache[pkgname].versions.keys() if pkg.get('version') in avail_vers: - ipkgs.append("%s=%s" % (pkg.get('name'), pkg.get('version'))) + ipkgs.append("%s=%s" % (pkgname, pkg.get('version'))) continue else: - self.logger.error("Package %s: desired version %s not in %s" \ - % (pkg.get('name'), pkg.get('version'), - avail_vers)) - bad_pkgs.append(pkg.get('name')) + self.logger.error("Package %s: desired version %s not in %s" + % (pkgname, pkg.get('version'), avail_vers)) + bad_pkgs.append(pkgname) if bad_pkgs: self.logger.error("Cannot find correct versions of packages:") self.logger.error(bad_pkgs) if not ipkgs: - return + return dict() if not self.cmd.run(self.pkgcmd % (" ".join(ipkgs))): self.logger.error("APT command failed") self.pkg_cache = apt.cache.Cache() self.extra = self.FindExtra() + states = dict() for package in packages: states[package] = self.VerifyPackage(package, [], checksums=False) if states[package]: self.modified.append(package) + return states - def VerifyPath(self, entry, _): + def VerifyPath(self, entry, _): # pylint: disable=W0613 """Do nothing here since we only verify Path type=ignore.""" return True diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/Blast.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/Blast.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/Blast.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/Blast.py 2017-01-10 19:18:17.000000000 +0000 @@ -13,9 +13,9 @@ __handles__ = [('Package', 'blast')] __req__ = {'Package': ['name', 'version', 'bname']} - def __init__(self, logger, setup, config): + def __init__(self, config): # dont use the sysv constructor - Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config) + Bcfg2.Client.Tools.PkgTool.__init__(self, config) noaskfile = tempfile.NamedTemporaryFile() self.noaskname = noaskfile.name try: diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/BundleDeps.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/BundleDeps.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/BundleDeps.py 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/BundleDeps.py 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,34 @@ +""" Bundle dependency support """ + +import Bcfg2.Client.Tools + + +class BundleDeps(Bcfg2.Client.Tools.Tool): + """Bundle dependency helper for Bcfg2. It handles Bundle tags inside the + bundles that references the required other bundles that should change the + modification status if the referenced bundles is modified.""" + + name = 'Bundle' + __handles__ = [('Bundle', None)] + __req__ = {'Bundle': ['name']} + + def InstallBundle(self, _): + """Simple no-op because we only need the BundleUpdated hook.""" + return dict() + + def VerifyBundle(self, entry, _): # pylint: disable=W0613 + """Simple no-op because we only need the BundleUpdated hook.""" + return True + + def BundleUpdated(self, entry): + """This handles the dependencies on this bundle. It searches all + Bundle tags in other bundles that references the current bundle name + and marks those tags as modified to trigger the modification hook on + the other bundles.""" + + bundle_name = entry.get('name') + for bundle in self.config.findall('./Bundle/Bundle'): + if bundle.get('name') == bundle_name and \ + bundle not in self.modified: + self.modified.append(bundle) + return dict() diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/Chkconfig.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/Chkconfig.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/Chkconfig.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/Chkconfig.py 2017-01-10 19:18:17.000000000 +0000 @@ -3,7 +3,6 @@ """This is chkconfig support.""" import os - import Bcfg2.Client.Tools import Bcfg2.Client.XML @@ -96,15 +95,15 @@ bootcmd = '/sbin/chkconfig %s %s' % (entry.get('name'), bootstatus) bootcmdrv = self.cmd.run(bootcmd).success - if self.setup['servicemode'] == 'disabled': + if Bcfg2.Options.setup.service_mode == 'disabled': # 'disabled' means we don't attempt to modify running svcs return bootcmdrv - buildmode = self.setup['servicemode'] == 'build' - if (entry.get('status') == 'on' and not buildmode) and \ - entry.get('current_status') == 'off': + buildmode = Bcfg2.Options.setup.service_mode == 'build' + if ((entry.get('status') == 'on' and not buildmode) and + entry.get('current_status') == 'off'): svccmdrv = self.start_service(entry) - elif (entry.get('status') == 'off' or buildmode) and \ - entry.get('current_status') == 'on': + elif ((entry.get('status') == 'off' or buildmode) and + entry.get('current_status') == 'on'): svccmdrv = self.stop_service(entry) else: svccmdrv = True # ignore status attribute diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/DebInit.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/DebInit.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/DebInit.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/DebInit.py 2017-01-10 19:18:17.000000000 +0000 @@ -3,6 +3,7 @@ import glob import os import re +import Bcfg2.Options import Bcfg2.Client.Tools # Debian squeeze and beyond uses a dependecy based boot sequence @@ -33,8 +34,8 @@ if entry.get('sequence'): if (deb_version in DEBIAN_OLD_STYLE_BOOT_SEQUENCE or - deb_version.startswith('5') or - os.path.exists('/etc/init.d/.legacy-bootordering')): + deb_version.startswith('5') or + os.path.exists('/etc/init.d/.legacy-bootordering')): start_sequence = int(entry.get('sequence')) kill_sequence = 100 - start_sequence else: @@ -137,10 +138,10 @@ bootcmd = '/usr/sbin/update-rc.d -f %s remove' % \ entry.get('name') bootcmdrv = self.cmd.run(bootcmd) - if self.setup['servicemode'] == 'disabled': + if Bcfg2.Options.setup.service_mode == 'disabled': # 'disabled' means we don't attempt to modify running svcs return bootcmdrv and seqcmdrv - buildmode = self.setup['servicemode'] == 'build' + buildmode = Bcfg2.Options.setup.service_mode == 'build' if (entry.get('status') == 'on' and not buildmode) and \ entry.get('current_status') == 'off': svccmdrv = self.start_service(entry) diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/FreeBSDInit.py 2017-01-10 19:18:17.000000000 +0000 @@ -1,27 +1,142 @@ """FreeBSD Init Support for Bcfg2.""" -__revision__ = '$Rev$' - -# TODO -# - hardcoded path to ports rc.d -# - doesn't know about /etc/rc.d/ import os +import re +import Bcfg2.Options import Bcfg2.Client.Tools class FreeBSDInit(Bcfg2.Client.Tools.SvcTool): """FreeBSD service support for Bcfg2.""" name = 'FreeBSDInit' + __execs__ = ['/usr/sbin/service', '/usr/sbin/sysrc'] __handles__ = [('Service', 'freebsd')] __req__ = {'Service': ['name', 'status']} + rcvar_re = re.compile(r'^(?P[a-z_]+_enable)="[A-Z]+"$') - def __init__(self, logger, cfg, setup): - Bcfg2.Client.Tools.Tool.__init__(self, logger, cfg, setup) - if os.uname()[0] != 'FreeBSD': - raise Bcfg2.Client.Tools.ToolInstantiationError + def get_svc_command(self, service, action): + return '/usr/sbin/service %s %s' % (service.get('name'), action) - def VerifyService(self, entry, _): + def verify_bootstatus(self, entry, bootstatus): + """Verify bootstatus for entry.""" + cmd = self.get_svc_command(entry, 'enabled') + current_bootstatus = bool(self.cmd.run(cmd)) + + if bootstatus == 'off': + if current_bootstatus: + entry.set('current_bootstatus', 'on') + return False + return True + elif not current_bootstatus: + entry.set('current_bootstatus', 'off') + return False return True - def get_svc_command(self, service, action): - return "/usr/local/etc/rc.d/%s %s" % (service.get('name'), action) + def check_service(self, entry): + # use 'onestatus' to enable status reporting for disabled services + cmd = self.get_svc_command(entry, 'onestatus') + return bool(self.cmd.run(cmd)) + + def stop_service(self, service): + # use 'onestop' to enable stopping of disabled services + self.logger.debug('Stopping service %s' % service.get('name')) + return self.cmd.run(self.get_svc_command(service, 'onestop')) + + def VerifyService(self, entry, _): + """Verify Service status for entry.""" + entry.set('target_status', entry.get('status')) # for reporting + bootstatus = self.get_bootstatus(entry) + if bootstatus is None: + return True + current_bootstatus = self.verify_bootstatus(entry, bootstatus) + + if entry.get('status') == 'ignore': + # 'ignore' should verify + current_svcstatus = True + svcstatus = True + else: + svcstatus = self.check_service(entry) + if entry.get('status') == 'on': + if svcstatus: + current_svcstatus = True + else: + current_svcstatus = False + elif entry.get('status') == 'off': + if svcstatus: + current_svcstatus = False + else: + current_svcstatus = True + + if svcstatus: + entry.set('current_status', 'on') + else: + entry.set('current_status', 'off') + + return current_bootstatus and current_svcstatus + + def InstallService(self, entry): + """Install Service entry.""" + self.logger.info("Installing Service %s" % (entry.get('name'))) + bootstatus = self.get_bootstatus(entry) + + # check if service exists + all_services_cmd = '/usr/sbin/service -l' + all_services = self.cmd.run(all_services_cmd).stdout.splitlines() + if entry.get('name') not in all_services: + self.logger.debug("Service %s does not exist" % entry.get('name')) + return False + + # get rcvar for service + vars = set() + rcvar_cmd = self.get_svc_command(entry, 'rcvar') + for line in self.cmd.run(rcvar_cmd).stdout.splitlines(): + match = self.rcvar_re.match(line) + if match: + vars.add(match.group('var')) + + if bootstatus is not None: + bootcmdrv = True + sysrcstatus = None + if bootstatus == 'on': + sysrcstatus = 'YES' + elif bootstatus == 'off': + sysrcstatus = 'NO' + if sysrcstatus is not None: + for var in vars: + if not self.cmd.run('/usr/sbin/sysrc %s="%s"' % (var, sysrcstatus)): + bootcmdrv = False + break + + if Bcfg2.Options.setup.service_mode == 'disabled': + # 'disabled' means we don't attempt to modify running svcs + return bootcmdrv + buildmode = Bcfg2.Options.setup.service_mode == 'build' + if (entry.get('status') == 'on' and not buildmode) and \ + entry.get('current_status') == 'off': + svccmdrv = self.start_service(entry) + elif (entry.get('status') == 'off' or buildmode) and \ + entry.get('current_status') == 'on': + svccmdrv = self.stop_service(entry) + else: + svccmdrv = True # ignore status attribute + return bootcmdrv and svccmdrv + else: + # when bootstatus is 'None', status == 'ignore' + return True + + def FindExtra(self): + """Find Extra FreeBSD Service entries.""" + specified = [entry.get('name') for entry in self.getSupportedEntries()] + extra = set() + for path in self.cmd.run("/usr/sbin/service -e").stdout.splitlines(): + name = os.path.basename(path) + if name not in specified: + extra.add(name) + return [Bcfg2.Client.XML.Element('Service', name=name, type='freebsd') + for name in list(extra)] + + def Remove(self, _): + """Remove extra service entries.""" + # Extra service removal is nonsensical + # Extra services need to be reflected in the config + return diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/HomeBrew.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/HomeBrew.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/HomeBrew.py 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/HomeBrew.py 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,54 @@ +"""This provides Bcfg2 support for homebrew packages.""" + +import Bcfg2.Client.Tools + + +class HomeBrew(Bcfg2.Client.Tools.PkgTool): + """Homebrew package support.""" + name = 'HomeBrew' + __execs__ = ["/usr/local/bin/brew"] + __handles__ = [('Package', 'homebrew')] + __req__ = {'Package': ['name', 'version']} + pkgtype = 'homebrew' + pkgtool = ('/usr/local/bin/brew install %s', ('%s', ['name'])) + + def RefreshPackages(self): + """Refresh memory hashes of packages.""" + pkgcache = self.cmd.run(["/usr/local/bin/brew", + "list", "--versions"]).stdout.splitlines() + self.installed = {} + for pkg in pkgcache: + pkgname, version = pkg.strip().split() + self.logger.debug(" pkgname: %s version: %s" % (pkgname, version)) + self.installed[pkgname] = version + + def VerifyPackage(self, entry, _): + """Verify Package status for entry.""" + self.logger.debug("VerifyPackage: %s : %s" % (entry.get('name'), + entry.get('version'))) + + if entry.attrib['name'] in self.installed: + if (self.installed[entry.attrib['name']] == + entry.attrib['version'] or + entry.attrib['version'] == 'any'): + return True + else: + self.logger.info(" %s: Wrong version installed. " + "Want %s, but have %s" % + (entry.get("name"), + entry.get("version"), + self.installed[entry.get("name")], + )) + + entry.set('current_version', self.installed[entry.get('name')]) + return False + entry.set('current_exists', 'false') + return False + + def Remove(self, packages): + """Remove extra packages.""" + pkg_names = [p.get('name') for p in packages] + self.logger.info("Removing packages: %s" % pkg_names) + self.cmd.run(["/usr/local/bin/brew", "uninstall"] + pkg_names) + self.RefreshPackages() + self.extra = self.FindExtra() diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/__init__.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/__init__.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/__init__.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/__init__.py 2017-01-10 19:18:17.000000000 +0000 @@ -3,21 +3,11 @@ import os import sys import stat +import logging +import Bcfg2.Options import Bcfg2.Client import Bcfg2.Client.XML from Bcfg2.Utils import Executor, ClassName -from Bcfg2.Compat import walk_packages # pylint: disable=W0622 - -__all__ = [m[1] for m in walk_packages(path=__path__)] - -# pylint: disable=C0103 -#: All available tools -drivers = [item for item in __all__ if item not in ['rpmtools']] - -#: The default set of tools that will be used if "drivers" is not set -#: in bcfg2.conf -default = drivers[:] -# pylint: enable=C0103 class ToolInstantiationError(Exception): @@ -35,6 +25,12 @@ .. autoattribute:: Bcfg2.Client.Tools.Tool.__important__ """ + options = [ + Bcfg2.Options.Option( + cf=('client', 'command_timeout'), + help="Timeout when running external commands other than probes", + type=Bcfg2.Options.Types.timeout)] + #: The name of the tool. By default this uses #: :class:`Bcfg2.Client.Tools.ClassName` to ensure that it is the #: same as the name of the class. @@ -78,30 +74,22 @@ #: runtime with a warning. conflicts = [] - def __init__(self, logger, setup, config): + def __init__(self, config): """ - :param logger: Logger that will be used for logging by this tool - :type logger: logging.Logger - :param setup: The option set Bcfg2 was invoked with - :type setup: Bcfg2.Options.OptionParser :param config: The XML configuration for this client :type config: lxml.etree._Element :raises: :exc:`Bcfg2.Client.Tools.ToolInstantiationError` """ - #: A :class:`Bcfg2.Options.OptionParser` object describing the - #: option set Bcfg2 was invoked with - self.setup = setup - #: A :class:`logging.Logger` object that will be used by this #: tool for logging - self.logger = logger + self.logger = logging.getLogger(self.name) #: The XML configuration for this client self.config = config #: An :class:`Bcfg2.Utils.Executor` object for #: running external commands. - self.cmd = Executor(timeout=self.setup['command_timeout']) + self.cmd = Executor(timeout=Bcfg2.Options.setup.command_timeout) #: A list of entries that have been modified by this tool self.modified = [] @@ -122,7 +110,7 @@ for struct in self.config: for entry in struct: if (entry.tag == 'Path' and - entry.get('important', 'false').lower() == 'true'): + entry.get('important', 'false').lower() == 'true'): self.__important__.append(entry.get('name')) self.handled = self.getSupportedEntries() @@ -141,27 +129,44 @@ raise ToolInstantiationError("%s: %s not executable" % (self.name, filename)) - def BundleUpdated(self, bundle, states): # pylint: disable=W0613 + def _install_allowed(self, entry): + """ Return true if the given entry is allowed to be installed by + the whitelist or blacklist """ + if (Bcfg2.Options.setup.decision == 'whitelist' and + not Bcfg2.Client.matches_white_list( + entry, Bcfg2.Options.setup.decision_list)): + self.logger.info("In whitelist mode: suppressing Action: %s" % + entry.get('name')) + return False + if (Bcfg2.Options.setup.decision == 'blacklist' and + not Bcfg2.Client.passes_black_list( + entry, Bcfg2.Options.setup.decision_list)): + self.logger.info("In blacklist mode: suppressing Action: %s" % + entry.get('name')) + return False + return True + + def BundleUpdated(self, bundle): # pylint: disable=W0613 """ Callback that is invoked when a bundle has been updated. :param bundle: The bundle that has been updated :type bundle: lxml.etree._Element - :param states: The :attr:`Bcfg2.Client.Frame.Frame.states` dict - :type states: dict - :returns: None """ - return + :returns: dict - A dict of the state of entries suitable for + updating :attr:`Bcfg2.Client.Client.states` + """ + return dict() - def BundleNotUpdated(self, bundle, states): # pylint: disable=W0613 + def BundleNotUpdated(self, bundle): # pylint: disable=W0613 """ Callback that is invoked when a bundle has been updated. :param bundle: The bundle that has been updated :type bundle: lxml.etree._Element - :param states: The :attr:`Bcfg2.Client.Frame.Frame.states` dict - :type states: dict - :returns: None """ - return + :returns: dict - A dict of the state of entries suitable for + updating :attr:`Bcfg2.Client.Client.states` + """ + return dict() - def Inventory(self, states, structures=None): + def Inventory(self, structures=None): """ Take an inventory of the system as it exists. This involves two steps: @@ -176,18 +181,19 @@ is the entry tag. E.g., a Path entry would be verified by calling :func:`VerifyPath`. - :param states: The :attr:`Bcfg2.Client.Frame.Frame.states` dict - :type states: dict :param structures: The list of structures (i.e., bundles) to get entries from. If this is not given, all children of :attr:`Bcfg2.Client.Tools.Tool.config` will be used. :type structures: list of lxml.etree._Element - :returns: None """ + :returns: dict - A dict of the state of entries suitable for + updating :attr:`Bcfg2.Client.Client.states` + """ if not structures: structures = self.config.getchildren() mods = self.buildModlist() + states = dict() for struct in structures: for entry in struct.getchildren(): if self.canVerify(entry): @@ -199,14 +205,17 @@ continue try: states[entry] = func(entry, mods) + except KeyboardInterrupt: + raise except: # pylint: disable=W0702 self.logger.error("%s: Unexpected failure verifying %s" % (self.name, self.primarykey(entry)), exc_info=1) self.extra = self.FindExtra() + return states - def Install(self, entries, states): + def Install(self, entries): """ Install entries. 'Install' in this sense means either initially install, or update as necessary to match the specification. @@ -218,9 +227,10 @@ :param entries: The entries to install :type entries: list of lxml.etree._Element - :param states: The :attr:`Bcfg2.Client.Frame.Frame.states` dict - :type states: dict - :returns: None """ + :returns: dict - A dict of the state of entries suitable for + updating :attr:`Bcfg2.Client.Client.states` + """ + states = dict() for entry in entries: try: func = getattr(self, "Install%s" % entry.tag) @@ -236,6 +246,7 @@ self.logger.error("%s: Unexpected failure installing %s" % (self.name, self.primarykey(entry)), exc_info=1) + return states def Remove(self, entries): """ Remove specified extra entries. @@ -396,8 +407,8 @@ #: The ``type`` attribute of Packages handled by this tool. pkgtype = 'echo' - def __init__(self, logger, setup, config): - Tool.__init__(self, logger, setup, config) + def __init__(self, config): + Tool.__init__(self, config) #: A dict of installed packages; the keys should be package #: names and the values should be simple strings giving the @@ -434,32 +445,27 @@ for pkg in packages) return self.pkgtool[0] % pkgargs - def Install(self, packages, states): + def Install(self, packages): """ Run a one-pass install where all required packages are installed with a single command, followed by single package installs in case of failure. :param entries: The entries to install :type entries: list of lxml.etree._Element - :param states: The :attr:`Bcfg2.Client.Frame.Frame.states` dict - :type states: dict - :returns: None """ + :returns: dict - A dict of the state of entries suitable for + updating :attr:`Bcfg2.Client.Client.states` + """ self.logger.info("Trying single pass package install for pkgtype %s" % self.pkgtype) - pkgcmd = self._get_package_command(packages) - self.logger.debug("Running command: %s" % pkgcmd) - if self.cmd.run(pkgcmd): + states = dict() + if self.cmd.run(self._get_package_command(packages)): self.logger.info("Single Pass Succeded") # set all package states to true and flush workqueues - pkgnames = [pkg.get('name') for pkg in packages] - for entry in list(states.keys()): - if (entry.tag == 'Package' - and entry.get('type') == self.pkgtype - and entry.get('name') in pkgnames): - self.logger.debug('Setting state to true for pkg %s' % - entry.get('name')) - states[entry] = True + for entry in packages: + self.logger.debug('Setting state to true for %s' % + self.primarykey(entry)) + states[entry] = True self.RefreshPackages() else: self.logger.error("Single Pass Failed") @@ -477,10 +483,13 @@ if self.cmd.run(self._get_package_command([pkg])): states[pkg] = True else: + states[pkg] = False self.logger.error("Failed to install package %s" % pkg.get('name')) self.RefreshPackages() - self.modified.extend(entry for entry in packages if states[entry]) + self.modified.extend(entry for entry in packages + if entry in states and states[entry]) + return states def RefreshPackages(self): """ Refresh the internal representation of the package @@ -494,7 +503,8 @@ extras = [data for data in list(self.installed.items()) if data[0] not in packages] return [Bcfg2.Client.XML.Element('Package', name=name, - type=self.pkgtype, version=version) + type=self.pkgtype, + current_version=version) for (name, version) in extras] FindExtra.__doc__ = Tool.FindExtra.__doc__ @@ -502,8 +512,14 @@ class SvcTool(Tool): """ Base class for tools that handle Service entries """ - def __init__(self, logger, setup, config): - Tool.__init__(self, logger, setup, config) + options = Tool.options + [ + Bcfg2.Options.Option( + '-s', '--service-mode', default='default', + choices=['default', 'disabled', 'build'], + help='Set client service mode')] + + def __init__(self, config): + Tool.__init__(self, config) #: List of services that have been restarted self.restarted = [] __init__.__doc__ = Tool.__init__.__doc__ @@ -558,7 +574,7 @@ return self.cmd.run(self.get_svc_command(service, 'stop')) def restart_service(self, service): - """ Restart a service. + """Restart a service. :param service: The service entry to modify :type service: lxml.etree._Element @@ -580,32 +596,34 @@ return bool(self.cmd.run(self.get_svc_command(service, 'status'))) def Remove(self, services): - if self.setup['servicemode'] != 'disabled': + if Bcfg2.Options.setup.service_mode != 'disabled': for entry in services: entry.set("status", "off") self.InstallService(entry) Remove.__doc__ = Tool.Remove.__doc__ - def BundleUpdated(self, bundle, states): - if self.setup['servicemode'] == 'disabled': + def BundleUpdated(self, bundle): + if Bcfg2.Options.setup.service_mode == 'disabled': return for entry in bundle: - if not self.handlesEntry(entry): + if (not self.handlesEntry(entry) or + not self._install_allowed(entry)): continue estatus = entry.get('status') restart = entry.get("restart", "true").lower() if (restart == "false" or estatus == 'ignore' or - (restart == "interactive" and not self.setup['interactive'])): + (restart == "interactive" and + not Bcfg2.Options.setup.interactive)): continue success = False if estatus == 'on': - if self.setup['servicemode'] == 'build': + if Bcfg2.Options.setup.service_mode == 'build': success = self.stop_service(entry) elif entry.get('name') not in self.restarted: - if self.setup['interactive']: + if Bcfg2.Options.setup.interactive: if not Bcfg2.Client.prompt('Restart service %s? (y/N) ' % entry.get('name')): continue @@ -617,9 +635,10 @@ if not success: self.logger.error("Failed to manipulate service %s" % (entry.get('name'))) + return dict() BundleUpdated.__doc__ = Tool.BundleUpdated.__doc__ - def Install(self, entries, states): + def Install(self, entries): install_entries = [] for entry in entries: if entry.get('install', 'true').lower() == 'false': @@ -627,7 +646,7 @@ (entry.tag, entry.get('name'))) else: install_entries.append(entry) - return Tool.Install(self, install_entries, states) + return Tool.Install(self, install_entries) Install.__doc__ = Tool.Install.__doc__ def InstallService(self, entry): diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/IPS.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/IPS.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/IPS.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/IPS.py 2017-01-10 19:18:17.000000000 +0000 @@ -15,14 +15,13 @@ __req__ = {'Package': ['name', 'version']} pkgtool = ('pkg install --no-refresh %s', ('%s', ['name'])) - def __init__(self, logger, setup, cfg): + def __init__(self, config): self.installed = {} self.pending_upgrades = set() self.image = image.Image() self.image.find_root('/', False) self.image.load_config() - Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, cfg) - self.cfg = cfg + Bcfg2.Client.Tools.PkgTool.__init__(self, config) def RefreshPackages(self): self.installed = dict() diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/launchd.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/launchd.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/launchd.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/launchd.py 2017-01-10 19:18:17.000000000 +0000 @@ -12,8 +12,8 @@ __execs__ = ['/bin/launchctl', '/usr/bin/defaults'] __req__ = {'Service': ['name', 'status']} - def __init__(self, logger, setup, config): - Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config) + def __init__(self, config): + Bcfg2.Client.Tools.Tool.__init__(self, config) # Locate plist file that provides given reverse-fqdn name: # @@ -117,9 +117,11 @@ status='on') for name in allsrv] - def BundleUpdated(self, bundle, states): + def BundleUpdated(self, bundle): """Reload launchd plist.""" - for entry in [entry for entry in bundle if self.handlesEntry(entry)]: + for entry in bundle: + if not self.handlesEntry(entry): + continue if not self.canInstall(entry): self.logger.error("Insufficient information to restart " "service %s" % entry.get('name')) diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/MacPorts.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/MacPorts.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/MacPorts.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/MacPorts.py 2017-01-10 19:18:17.000000000 +0000 @@ -12,11 +12,6 @@ pkgtype = 'macport' pkgtool = ('/opt/local/bin/port install %s', ('%s', ['name'])) - def __init__(self, logger, setup, config): - Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config) - self.installed = {} - self.RefreshPackages() - def RefreshPackages(self): """Refresh memory hashes of packages.""" pkgcache = self.cmd.run(["/opt/local/bin/port", @@ -42,10 +37,9 @@ return False if entry.attrib['name'] in self.installed: - if (self.installed[entry.attrib['name']] == entry.attrib['version'] - or entry.attrib['version'] == 'any'): - # if (not self.setup['quick'] and - # entry.get('verify', 'true') == 'true'): + if (entry.attrib['version'] == 'any' or + self.installed[entry.attrib['name']] == + entry.attrib['version']): # FIXME: We should be able to check this once # http://trac.macports.org/ticket/15709 is implemented return True diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/OpenCSW.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/OpenCSW.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/OpenCSW.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/OpenCSW.py 2017-01-10 19:18:17.000000000 +0000 @@ -14,9 +14,9 @@ __handles__ = [('Package', 'opencsw')] __req__ = {'Package': ['name', 'version', 'bname']} - def __init__(self, logger, setup, config): + def __init__(self, config): # dont use the sysv constructor - Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config) + Bcfg2.Client.Tools.PkgTool.__init__(self, config) noaskfile = tempfile.NamedTemporaryFile() self.noaskname = noaskfile.name try: diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/Pacman.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/Pacman.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/Pacman.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/Pacman.py 2017-01-10 19:18:17.000000000 +0000 @@ -5,7 +5,7 @@ class Pacman(Bcfg2.Client.Tools.PkgTool): - '''Archlinux package support''' + '''Arch Linux package support''' name = 'Pacman' __execs__ = ["/usr/bin/pacman"] __handles__ = [('Package', 'pacman')] @@ -13,11 +13,6 @@ pkgtype = 'pacman' pkgtool = ("/usr/bin/pacman --needed --noconfirm --noprogressbar") - def __init__(self, logger, setup, config): - Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config) - self.installed = {} - self.RefreshPackages() - def RefreshPackages(self): '''Refresh memory hashes of packages''' self.installed = {} @@ -29,8 +24,8 @@ def VerifyPackage(self, entry, _): '''Verify Package status for entry''' - self.logger.info("VerifyPackage: %s : %s" % (entry.get('name'), - entry.get('version'))) + self.logger.debug("VerifyPackage: %s : %s" % (entry.get('name'), + entry.get('version'))) if 'version' not in entry.attrib: self.logger.info("Cannot verify unversioned package %s" % @@ -42,18 +37,15 @@ return True elif self.installed[entry.attrib['name']] == \ entry.attrib['version']: - # if (not self.setup['quick'] and - # entry.get('verify', 'true') == 'true'): # FIXME: need to figure out if pacman # allows you to verify packages return True else: entry.set('current_version', self.installed[entry.get('name')]) - self.logger.info("attribname: %s" % (entry.attrib['name'])) - self.logger.info("attribname: %s" % (entry.attrib['name'])) + self.logger.debug("attribname: %s" % (entry.attrib['name'])) return False entry.set('current_exists', 'false') - self.logger.info("attribname: %s" % (entry.attrib['name'])) + self.logger.debug("attribname: %s" % (entry.attrib['name'])) return False def Remove(self, packages): @@ -65,7 +57,7 @@ self.RefreshPackages() self.extra = self.FindExtra() - def Install(self, packages, states): + def Install(self, packages): ''' Pacman Install ''' diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/Pkgng.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/Pkgng.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/Pkgng.py 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/Pkgng.py 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,227 @@ +"""This is the Bcfg2 support for pkg.""" + +import os +import Bcfg2.Options +import Bcfg2.Client.Tools + + +class Pkgng(Bcfg2.Client.Tools.Tool): + """Support for pkgng packages on FreeBSD.""" + + options = Bcfg2.Client.Tools.Tool.options + [ + Bcfg2.Options.PathOption( + cf=('Pkgng', 'path'), + default='/usr/sbin/pkg', dest='pkg_path', + help='Pkgng tool path')] + + name = 'Pkgng' + __execs__ = [] + __handles__ = [('Package', 'pkgng'), ('Path', 'ignore')] + __req__ = {'Package': ['name', 'version'], 'Path': ['type']} + + def __init__(self, config): + Bcfg2.Client.Tools.Tool.__init__(self, config) + + self.pkg = Bcfg2.Options.setup.pkg_path + self.__execs__ = [self.pkg] + + self.pkgcmd = self.pkg + ' install -fy' + if not Bcfg2.Options.setup.debug: + self.pkgcmd += ' -q' + self.pkgcmd += ' %s' + + self.ignores = [entry.get('name') for struct in config + for entry in struct + if entry.tag == 'Path' and + entry.get('type') == 'ignore'] + + self.__important__ = self.__important__ + \ + [entry.get('name') for struct in config + for entry in struct + if (entry.tag == 'Path' and + entry.get('name').startswith('/etc/pkg/'))] + self.nonexistent = [entry.get('name') for struct in config + for entry in struct + if entry.tag == 'Path' and + entry.get('type') == 'nonexistent'] + self.actions = {} + self.pkg_cache = {} + + try: + self._load_pkg_cache() + except OSError: + raise Bcfg2.Client.Tools.ToolInstantiationError + + def _load_pkg_cache(self): + """Cache the version of all currently installed packages.""" + self.pkg_cache = {} + output = self.cmd.run([self.pkg, 'query', '-a', '%n %v']).stdout + for line in output.splitlines(): + parts = line.split(' ') + name = ' '.join(parts[:-1]) + self.pkg_cache[name] = parts[-1] + + def FindExtra(self): + """Find extra packages.""" + packages = [entry.get('name') for entry in self.getSupportedEntries()] + extras = [(name, value) for (name, value) in self.pkg_cache.items() + if name not in packages] + return [Bcfg2.Client.XML.Element('Package', name=name, type='pkgng', + current_version=version) + for (name, version) in extras] + + def VerifyChecksums(self, entry, modlist): + """Verify the checksum of the files, owned by a package.""" + output = self.cmd.run([self.pkg, 'check', '-s', + entry.get('name')]).stdout.splitlines() + files = [] + for item in output: + if "checksum mismatch" in item: + files.append(item.split()[-1]) + elif "No such file or directory" in item: + continue + else: + self.logger.error("Got Unsupported pattern %s " + "from pkg check" % item) + + files = list(set(files) - set(self.ignores)) + # We check if there is file in the checksum to do + if files: + # if files are found there we try to be sure our modlist is sane + # with erroneous symlinks + modlist = [os.path.realpath(filename) for filename in modlist] + bad = [filename for filename in files if filename not in modlist] + if bad: + self.logger.debug("It is suggested that you either manage " + "these files, revert the changes, or ignore " + "false failures:") + self.logger.info("Package %s failed validation. Bad files " + "are:" % entry.get('name')) + self.logger.info(bad) + entry.set('qtext', + "Reinstall Package %s-%s to fix failing files? " + "(y/N) " % (entry.get('name'), entry.get('version'))) + return False + return True + + def _get_candidate_versions(self, name): + """ + Get versions of the specified package name available for + installation from the configured remote repositories. + """ + output = self.cmd.run([self.pkg, 'search', '-U', '-Qversion', '-q', + '-Sname', '-e', name]).stdout.splitlines() + versions = [] + for line in output: + versions.append(line) + + if len(versions) == 0: + return None + + return sorted(versions) + + def VerifyPackage(self, entry, modlist, checksums=True): + """Verify package for entry.""" + if 'version' not in entry.attrib: + self.logger.info("Cannot verify unversioned package %s" % + (entry.attrib['name'])) + return False + + pkgname = entry.get('name') + if pkgname not in self.pkg_cache: + self.logger.info("Package %s not installed" % (entry.get('name'))) + entry.set('current_exists', 'false') + return False + + installed_version = self.pkg_cache[pkgname] + candidate_versions = self._get_candidate_versions(pkgname) + if candidate_versions is not None: + candidate_version = candidate_versions[0] + else: + self.logger.error("Package %s is installed but no candidate" + "version was found." % (entry.get('name'))) + return False + + if entry.get('version').startswith('auto'): + desired_version = candidate_version + entry.set('version', "auto: %s" % desired_version) + elif entry.get('version').startswith('any'): + desired_version = installed_version + entry.set('version', "any: %s" % desired_version) + else: + desired_version = entry.get('version') + + if desired_version != installed_version: + entry.set('current_version', installed_version) + entry.set('qtext', "Modify Package %s (%s -> %s)? (y/N) " % + (entry.get('name'), entry.get('current_version'), + desired_version)) + return False + else: + # version matches + if (not Bcfg2.Options.setup.quick and + entry.get('verify', 'true') == 'true' and + checksums): + pkgsums = self.VerifyChecksums(entry, modlist) + return pkgsums + return True + + def Remove(self, packages): + """Deal with extra configuration detected.""" + pkgnames = " ".join([pkg.get('name') for pkg in packages]) + if len(packages) > 0: + self.logger.info('Removing packages:') + self.logger.info(pkgnames) + self.cmd.run([self.pkg, 'delete', '-y', pkgnames]) + self._load_pkg_cache() + self.modified += packages + self.extra = self.FindExtra() + + def Install(self, packages): + ipkgs = [] + bad_pkgs = [] + for pkg in packages: + versions = self._get_candidate_versions(pkg.get('name')) + if versions is None: + self.logger.error("pkg has no information about package %s" % + (pkg.get('name'))) + continue + + if pkg.get('version').startswith('auto') or \ + pkg.get('version').startswith('any'): + ipkgs.append("%s-%s" % (pkg.get('name'), versions[0])) + continue + + if pkg.get('version') in versions: + ipkgs.append("%s-%s" % (pkg.get('name'), pkg.get('version'))) + continue + else: + self.logger.error("Package %s: desired version %s not in %s" % + (pkg.get('name'), pkg.get('version'), + versions)) + bad_pkgs.append(pkg.get('name')) + + if bad_pkgs: + self.logger.error("Cannot find correct versions of packages:") + self.logger.error(bad_pkgs) + if not ipkgs: + return dict() + if not self.cmd.run(self.pkgcmd % (" ".join(ipkgs))): + self.logger.error("pkg command failed") + self._load_pkg_cache() + self.extra = self.FindExtra() + mark = [] + states = dict() + for package in packages: + states[package] = self.VerifyPackage(package, [], checksums=False) + if states[package]: + self.modified.append(package) + if package.get('origin') == 'Packages': + mark.append(package.get('name')) + if mark: + self.cmd.run([self.pkg, 'set', '-A1', '-y'] + mark) + return states + + def VerifyPath(self, _entry, _): + """Do nothing here since we only verify Path type=ignore.""" + return True diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/Portage.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/Portage.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/Portage.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/Portage.py 2017-01-10 19:18:17.000000000 +0000 @@ -5,9 +5,13 @@ class Portage(Bcfg2.Client.Tools.PkgTool): - """The Gentoo toolset implements package and service operations and - inherits the rest from Toolset.Toolset.""" - name = 'Portage' + """The Gentoo toolset implements package and service operations + and inherits the rest from Tools.Tool.""" + + options = Bcfg2.Client.Tools.PkgTool.options + [ + Bcfg2.Options.BooleanOption( + cf=('Portage', 'binpkgonly'), help='Portage binary packages only')] + __execs__ = ['/usr/bin/emerge', '/usr/bin/equery'] __handles__ = [('Package', 'ebuild')] __req__ = {'Package': ['name', 'version']} @@ -17,17 +21,15 @@ 'version'])) pkgtool = ('emerge %s', ('=%s-%s', ['name', 'version'])) - def __init__(self, logger, cfg, setup): + def __init__(self, config): self._initialised = False - Bcfg2.Client.Tools.PkgTool.__init__(self, logger, cfg, setup) + Bcfg2.Client.Tools.PkgTool.__init__(self, config) self._initialised = True self.__important__ = self.__important__ + ['/etc/make.conf'] self._pkg_pattern = re.compile(r'(.*)-(\d.*)') self._ebuild_pattern = re.compile('(ebuild|binary)') - self.cfg = cfg self.installed = {} - self._binpkgonly = self.setup.get('portage_binpkgonly', False) - if self._binpkgonly: + if Bcfg2.Options.setup.binpkgonly: self.pkgtool = self._binpkgtool self.RefreshPackages() @@ -62,9 +64,9 @@ version = self.installed[entry.get('name')] entry.set('current_version', version) - if not self.setup['quick']: + if not Bcfg2.Options.setup.quick: if ('verify' not in entry.attrib or - entry.get('verify').lower() == 'true'): + entry.get('verify').lower() == 'true'): # Check the package if: # - Not running in quick mode diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/POSIX/Augeas.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/POSIX/Augeas.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/POSIX/Augeas.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/POSIX/Augeas.py 2017-01-10 19:18:17.000000000 +0000 @@ -10,10 +10,10 @@ class AugeasCommand(object): """ Base class for all Augeas command objects """ - def __init__(self, command, augeas_obj, logger): + def __init__(self, entry, command, augeas_obj, logger): self._augeas = augeas_obj self.command = command - self.entry = self.command.getparent() + self.entry = entry self.logger = logger def get_path(self, attr="path"): @@ -115,8 +115,8 @@ class Move(AugeasCommand): """ Augeas ``move`` command """ - def __init__(self, command, augeas_obj, logger): - AugeasCommand.__init__(self, command, augeas_obj, logger) + def __init__(self, entry, command, augeas_obj, logger): + AugeasCommand.__init__(self, entry, command, augeas_obj, logger) self.source = self.get_path("source") self.dest = self.get_path("destination") @@ -131,8 +131,8 @@ class Set(AugeasCommand): """ Augeas ``set`` command """ - def __init__(self, command, augeas_obj, logger): - AugeasCommand.__init__(self, command, augeas_obj, logger) + def __init__(self, entry, command, augeas_obj, logger): + AugeasCommand.__init__(self, entry, command, augeas_obj, logger) self.value = self.command.get("value") def verify(self): @@ -146,15 +146,15 @@ class Clear(Set): """ Augeas ``clear`` command """ - def __init__(self, command, augeas_obj, logger): - Set.__init__(self, command, augeas_obj, logger) + def __init__(self, entry, command, augeas_obj, logger): + Set.__init__(self, entry, command, augeas_obj, logger) self.value = None class SetMulti(AugeasCommand): """ Augeas ``setm`` command """ - def __init__(self, command, augeas_obj, logger): - AugeasCommand.__init__(self, command, augeas_obj, logger) + def __init__(self, entry, command, augeas_obj, logger): + AugeasCommand.__init__(self, entry, command, augeas_obj, logger) self.sub = self.command.get("sub") self.value = self.command.get("value") self.base = self.get_path("base") @@ -170,8 +170,8 @@ class Insert(AugeasCommand): """ Augeas ``ins`` command """ - def __init__(self, command, augeas_obj, logger): - AugeasCommand.__init__(self, command, augeas_obj, logger) + def __init__(self, entry, command, augeas_obj, logger): + AugeasCommand.__init__(self, entry, command, augeas_obj, logger) self.label = self.command.get("label") self.where = self.command.get("where", "before") self.before = self.where == "before" @@ -190,12 +190,12 @@ :ref:`client-tools-augeas`. """ __req__ = ['name', 'mode', 'owner', 'group'] - def __init__(self, logger, setup, config): - POSIXTool.__init__(self, logger, setup, config) + def __init__(self, config): + POSIXTool.__init__(self, config) self._augeas = dict() # file tool for setting initial values of files that don't # exist - self.filetool = POSIXFile(logger, setup, config) + self.filetool = POSIXFile(config) def get_augeas(self, entry): """ Get an augeas object for the given entry. """ @@ -230,11 +230,12 @@ objects representing the commands. """ rv = [] - for cmd in entry.iterchildren(): + for cmd in entry: if cmd.tag == "Initial": continue if cmd.tag in globals(): - rv.append(globals()[cmd.tag](cmd, self.get_augeas(entry), + rv.append(globals()[cmd.tag](entry, cmd, + self.get_augeas(entry), self.logger)) else: err = "Augeas: Unknown command %s in %s" % (cmd.tag, diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/POSIX/base.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/POSIX/base.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/POSIX/base.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/POSIX/base.py 2017-01-10 19:18:17.000000000 +0000 @@ -6,9 +6,11 @@ import grp import stat import copy +import errno import shutil import Bcfg2.Client.Tools import Bcfg2.Client.XML +import Bcfg2.Options from Bcfg2.Compat import oct_mode try: @@ -37,6 +39,22 @@ class POSIXTool(Bcfg2.Client.Tools.Tool): """ Base class for tools that handle POSIX (Path) entries """ + + options = [ + Bcfg2.Options.Option( + cf=('POSIX', 'secontext_ignore'), + default=['anon_inodefs_t', 'bdev_t', 'binfmt_misc_fs_t', + 'capifs_t', 'configfs_t', 'cpusetfs_t', 'ecryptfs_t', + 'eventpollfs_t', 'futexfs_t', 'hugetlbfs_t', 'ibmasmfs_t', + 'inotifyfs_t', 'mvfs_t', 'nfsd_fs_t', 'oprofilefs_t', + 'ramfs_t', 'romfs_t', 'rpc_pipefs_t', 'spufs_t', + 'squash_t', 'vmblock_t', 'vxfs_t', 'xenfs_t', 'autofs_t', + 'cifs_t', 'dosfs_t', 'fusefs_t', 'iso9660_t', + 'removable_t', 'nfs_t'], + help='secontext types to ignore labeling errors', + type=Bcfg2.Options.Types.colon_list) + ] + def fully_specified(self, entry): # pylint: disable=W0613 """ return True if the entry is fully specified """ # checking is done by __req__ @@ -105,23 +123,23 @@ path = entry.get("name") rv = True - if entry.get("owner") and entry.get("group"): - try: - self.logger.debug("POSIX: Setting ownership of %s to %s:%s" % - (path, - self._norm_entry_uid(entry), - self._norm_entry_gid(entry))) - os.chown(path, self._norm_entry_uid(entry), - self._norm_entry_gid(entry)) - except KeyError: - self.logger.error('POSIX: Failed to change ownership of %s' % - path) - rv = False - os.chown(path, 0, 0) - except OSError: - self.logger.error('POSIX: Failed to change ownership of %s' % - path) - rv = False + if os.geteuid() == 0: + if entry.get("owner") and entry.get("group"): + try: + self.logger.debug("POSIX: Setting ownership of %s to %s:%s" + % (path, + self._norm_entry_uid(entry), + self._norm_entry_gid(entry))) + os.chown(path, self._norm_entry_uid(entry), + self._norm_entry_gid(entry)) + except (OSError, KeyError): + self.logger.error('POSIX: Failed to change ownership of %s' + % path) + rv = False + if sys.exc_info()[0] == KeyError: + os.chown(path, 0, 0) + else: + self.logger.debug("POSIX: Run as non-root, not setting ownership") if entry.get("mode"): wanted_mode = int(entry.get('mode'), 8) @@ -272,7 +290,7 @@ rv &= self._apply_acl(defacl, path, posix1e.ACL_TYPE_DEFAULT) return rv - def _set_secontext(self, entry, path=None): + def _set_secontext(self, entry, path=None): # pylint: disable=R0911 """ set the SELinux context of the file on disk according to the config""" if not HAS_SELINUX: @@ -284,25 +302,28 @@ if not context: # no context listed return True - - if context == '__default__': - try: + secontext = selinux.lgetfilecon(path)[1].split(":")[2] + if secontext in Bcfg2.Options.setup.secontext_ignore: + return True + try: + if context == '__default__': selinux.restorecon(path) - rv = True - except OSError: - err = sys.exc_info()[1] - self.logger.error("POSIX: Failed to restore SELinux context " - "for %s: %s" % (path, err)) - rv = False - else: - try: - rv = selinux.lsetfilecon(path, context) == 0 - except OSError: - err = sys.exc_info()[1] - self.logger.error("POSIX: Failed to restore SELinux context " - "for %s: %s" % (path, err)) - rv = False - return rv + return True + else: + return selinux.lsetfilecon(path, context) == 0 + except OSError: + err = sys.exc_info()[1] + if err.errno == errno.EOPNOTSUPP: + # Operation not supported + if context != '__default__': + self.logger.debug("POSIX: Failed to set SELinux context " + "for %s: %s" % (path, err)) + return False + return True + err = sys.exc_info()[1] + self.logger.error("POSIX: Failed to set or restore SELinux " + "context for %s: %s" % (path, err)) + return False def _norm_gid(self, gid): """ This takes a group name or gid and returns the @@ -518,7 +539,8 @@ (path, attrib['current_group'], entry.get('group'))) if (wanted_mode and - oct_mode(int(attrib['current_mode'], 8)) != oct_mode(wanted_mode)): + oct_mode(int(attrib['current_mode'], 8)) != + oct_mode(wanted_mode)): errors.append("Permissions for path %s are incorrect. " "Current permissions are %s but should be %s" % (path, attrib['current_mode'], entry.get('mode'))) @@ -540,10 +562,10 @@ except OSError: errors.append("%s has no default SELinux context" % entry.get("name")) - else: - wanted_secontext = entry.get("secontext") + elif entry.get("secontext"): + wanted_secontext = entry.get("secontext").split(":")[2] if (wanted_secontext and - attrib['current_secontext'] != wanted_secontext): + attrib['current_secontext'] != wanted_secontext): errors.append("SELinux context for path %s is incorrect. " "Current context is %s but should be %s" % (path, attrib['current_secontext'], diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/POSIX/Device.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/POSIX/Device.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/POSIX/Device.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/POSIX/Device.py 2017-01-10 19:18:17.000000000 +0000 @@ -1,4 +1,4 @@ -""" Handle entries """ +""" Handle entries """ import os import sys @@ -6,14 +6,14 @@ class POSIXDevice(POSIXTool): - """ Handle entries """ + """ Handle entries """ __req__ = ['name', 'dev_type', 'mode', 'owner', 'group'] def fully_specified(self, entry): if entry.get('dev_type') in ['block', 'char']: # check if major/minor are properly specified if (entry.get('major') is None or - entry.get('minor') is None): + entry.get('minor') is None): return False return True diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/POSIX/File.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/POSIX/File.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/POSIX/File.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/POSIX/File.py 2017-01-10 19:18:17.000000000 +0000 @@ -3,11 +3,12 @@ import os import sys import stat -import time import difflib import tempfile +import Bcfg2.Options from Bcfg2.Client.Tools.POSIX.base import POSIXTool from Bcfg2.Compat import unicode, b64encode, b64decode # pylint: disable=W0622 +import Bcfg2.Utils class POSIXFile(POSIXTool): @@ -17,21 +18,6 @@ def fully_specified(self, entry): return entry.text is not None or entry.get('empty', 'false') == 'true' - def _is_string(self, strng, encoding): - """ Returns true if the string contains no ASCII control - characters and can be decoded from the specified encoding. """ - for char in strng: - if ord(char) < 9 or ord(char) > 13 and ord(char) < 32: - return False - if not hasattr(strng, "decode"): - # py3k - return True - try: - strng.decode(encoding) - return True - except: # pylint: disable=W0702 - return False - def _get_data(self, entry): """ Get a tuple of (, ) for the given entry """ is_binary = entry.get('encoding', 'ascii') == 'base64' @@ -43,7 +29,7 @@ tempdata = entry.text if isinstance(tempdata, unicode) and unicode != str: try: - tempdata = tempdata.encode(self.setup['encoding']) + tempdata = tempdata.encode(Bcfg2.Options.setup.encoding) except UnicodeEncodeError: err = sys.exc_info()[1] self.logger.error("POSIX: Error encoding file %s: %s" % @@ -56,7 +42,7 @@ if isinstance(tempdata, str) and str != unicode: tempdatasize = len(tempdata) else: - tempdatasize = len(tempdata.encode(self.setup['encoding'])) + tempdatasize = len(tempdata.encode(Bcfg2.Options.setup.encoding)) different = False content = None @@ -78,7 +64,7 @@ content = open(entry.get('name')).read() except UnicodeDecodeError: content = open(entry.get('name'), - encoding=self.setup['encoding']).read() + encoding=Bcfg2.Options.setup.encoding).read() except IOError: self.logger.error("POSIX: Failed to read %s: %s" % (entry.get("name"), sys.exc_info()[1])) @@ -89,7 +75,7 @@ self.logger.debug("POSIX: %s has incorrect contents" % entry.get("name")) self._get_diffs( - entry, interactive=self.setup['interactive'], + entry, interactive=Bcfg2.Options.setup.interactive, sensitive=entry.get('sensitive', 'false').lower() == 'true', is_binary=is_binary, content=content) return POSIXTool.verify(self, entry, modlist) and not different @@ -116,7 +102,7 @@ os.fdopen(newfd, 'w').write(filedata) else: os.fdopen(newfd, 'wb').write( - filedata.encode(self.setup['encoding'])) + filedata.encode(Bcfg2.Options.setup.encoding)) except (OSError, IOError): err = sys.exc_info()[1] self.logger.error("POSIX: Failed to open temp file %s for writing " @@ -181,20 +167,20 @@ (entry.get("name"), sys.exc_info()[1])) return False if not is_binary: - is_binary |= not self._is_string(content, self.setup['encoding']) + is_binary |= not Bcfg2.Utils.is_string( + content, Bcfg2.Options.setup.encoding) if is_binary: # don't compute diffs if the file is binary prompt.append('Binary file, no printable diff') attrs['current_bfile'] = b64encode(content) else: + diff = self._diff(content, self._get_data(entry)[0], + filename=entry.get("name")) if interactive: - diff = self._diff(content, self._get_data(entry)[0], - difflib.unified_diff, - filename=entry.get("name")) if diff: - udiff = '\n'.join(l.rstrip('\n') for l in diff) + udiff = '\n'.join(diff) if hasattr(udiff, "decode"): - udiff = udiff.decode(self.setup['encoding']) + udiff = udiff.decode(Bcfg2.Options.setup.encoding) try: prompt.append(udiff) except UnicodeEncodeError: @@ -207,8 +193,6 @@ prompt.append("Diff took too long to compute, no " "printable diff") if not sensitive: - diff = self._diff(content, self._get_data(entry)[0], - difflib.ndiff, filename=entry.get("name")) if diff: attrs["current_bdiff"] = b64encode("\n".join(diff)) else: @@ -219,28 +203,16 @@ for attr, val in attrs.items(): entry.set(attr, val) - def _diff(self, content1, content2, difffunc, filename=None): - """ Return a diff of the two strings, as produced by difffunc. - warns after 5 seconds and times out after 30 seconds. """ - rv = [] - start = time.time() - longtime = False - for diffline in difffunc(content1.split('\n'), - content2.split('\n')): - now = time.time() - rv.append(diffline) - if now - start > 5 and not longtime: - if filename: - self.logger.info("POSIX: Diff of %s taking a long time" % - filename) - else: - self.logger.info("POSIX: Diff taking a long time") - longtime = True - elif now - start > 30: - if filename: - self.logger.error("POSIX: Diff of %s took too long; " - "giving up" % filename) - else: - self.logger.error("POSIX: Diff took too long; giving up") - return False - return rv + def _diff(self, content1, content2, filename=None): + """ Return a unified diff of the two strings """ + + if filename: + fromfile = "%s (on disk)" % filename + tofile = "%s (from bcfg2)" % filename + else: + fromfile = "" + tofile = "" + return difflib.unified_diff(content1.split('\n'), + content2.split('\n'), + fromfile=fromfile, + tofile=tofile) diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/POSIX/__init__.py 2017-01-10 19:18:17.000000000 +0000 @@ -4,20 +4,31 @@ import re import sys import shutil -from datetime import datetime +import Bcfg2.Options import Bcfg2.Client.Tools +from datetime import datetime from Bcfg2.Compat import walk_packages from Bcfg2.Client.Tools.POSIX.base import POSIXTool class POSIX(Bcfg2.Client.Tools.Tool): """POSIX File support code.""" - name = 'POSIX' - def __init__(self, logger, setup, config): - Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config) - self.ppath = setup['ppath'] - self.max_copies = setup['max_copies'] + options = Bcfg2.Client.Tools.Tool.options + POSIXTool.options + [ + Bcfg2.Options.PathOption( + cf=('paranoid', 'path'), default='/var/cache/bcfg2', + dest='paranoid_path', + help='Specify path for paranoid file backups'), + Bcfg2.Options.Option( + cf=('paranoid', 'max_copies'), default=1, type=int, + dest='paranoid_copies', + help='Specify the number of paranoid copies you want'), + Bcfg2.Options.BooleanOption( + '-P', '--paranoid', cf=('client', 'paranoid'), + help='Make automatic backups of config files')] + + def __init__(self, config): + Bcfg2.Client.Tools.Tool.__init__(self, config) self._handlers = self._load_handlers() self.logger.debug("POSIX: Handlers loaded: %s" % (", ".join(self._handlers.keys()))) @@ -56,7 +67,7 @@ if POSIXTool in hdlr.__mro__: # figure out what entry type this handler handles etype = hdlr.__name__[5:].lower() - rv[etype] = hdlr(self.logger, self.setup, self.config) + rv[etype] = hdlr(self.config) return rv def canVerify(self, entry): @@ -92,7 +103,7 @@ self.logger.debug("POSIX: Verifying entry %s:%s:%s" % (entry.tag, entry.get("type"), entry.get("name"))) ret = self._handlers[entry.get("type")].verify(entry, modlist) - if self.setup['interactive'] and not ret: + if Bcfg2.Options.setup.interactive and not ret: entry.set('qtext', '%s\nInstall %s %s: (y/N) ' % (entry.get('qtext', ''), @@ -106,35 +117,39 @@ bkupnam + r'_\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{6}$') # current list of backups for this file try: - bkuplist = [f for f in os.listdir(self.ppath) if - bkup_re.match(f)] + bkuplist = [f + for f in os.listdir(Bcfg2.Options.setup.paranoid_path) + if bkup_re.match(f)] except OSError: err = sys.exc_info()[1] self.logger.error("POSIX: Failed to create backup list in %s: %s" % - (self.ppath, err)) + (Bcfg2.Options.setup.paranoid_path, err)) return bkuplist.sort() - while len(bkuplist) >= int(self.max_copies): + while len(bkuplist) >= int(Bcfg2.Options.setup.paranoid_copies): # remove the oldest backup available oldest = bkuplist.pop(0) self.logger.info("POSIX: Removing old backup %s" % oldest) try: - os.remove(os.path.join(self.ppath, oldest)) + os.remove(os.path.join(Bcfg2.Options.setup.paranoid_path, + oldest)) except OSError: err = sys.exc_info()[1] - self.logger.error("POSIX: Failed to remove old backup %s: %s" % - (os.path.join(self.ppath, oldest), err)) + self.logger.error( + "POSIX: Failed to remove old backup %s: %s" % + (os.path.join(Bcfg2.Options.setup.paranoid_path, oldest), + err)) def _paranoid_backup(self, entry): """ Take a backup of the specified entry for paranoid mode """ if (entry.get("paranoid", 'false').lower() == 'true' and - self.setup.get("paranoid", False) and - entry.get('current_exists', 'true') == 'true' and - not os.path.isdir(entry.get("name"))): + Bcfg2.Options.setup.paranoid and + entry.get('current_exists', 'true') == 'true' and + not os.path.isdir(entry.get("name"))): self._prune_old_backups(entry) bkupnam = "%s_%s" % (entry.get('name').replace('/', '_'), datetime.isoformat(datetime.now())) - bfile = os.path.join(self.ppath, bkupnam) + bfile = os.path.join(Bcfg2.Options.setup.paranoid_path, bkupnam) try: shutil.copy(entry.get('name'), bfile) self.logger.info("POSIX: Backup of %s saved to %s" % diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/POSIX/Nonexistent.py 2017-01-10 19:18:17.000000000 +0000 @@ -24,8 +24,8 @@ for struct in self.config.getchildren(): for el in struct.getchildren(): if (el.tag == 'Path' and - el.get('type') != 'nonexistent' and - el.get('name').startswith(ename)): + el.get('type') != 'nonexistent' and + el.get('name').startswith(ename)): self.logger.error('POSIX: Not removing %s. One or ' 'more files in this directory are ' 'specified in your configuration.' % diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/POSIXUsers.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/POSIXUsers.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/POSIXUsers.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/POSIXUsers.py 2017-01-10 19:18:17.000000000 +0000 @@ -3,14 +3,38 @@ import pwd import grp +import Bcfg2.Options import Bcfg2.Client.XML import Bcfg2.Client.Tools from Bcfg2.Utils import PackedDigitRange +def uid_range_type(val): + """ Option type to unpack a list of numerical ranges """ + return PackedDigitRange(*Bcfg2.Options.Types.comma_list(val)) + + class POSIXUsers(Bcfg2.Client.Tools.Tool): """ A tool to handle creating users and groups with useradd/mod/del and groupadd/mod/del """ + options = Bcfg2.Client.Tools.Tool.options + [ + Bcfg2.Options.Option( + cf=('POSIXUsers', 'uid_whitelist'), default=[], + type=uid_range_type, + help="UID ranges the POSIXUsers tool will manage"), + Bcfg2.Options.Option( + cf=('POSIXUsers', 'gid_whitelist'), default=[], + type=uid_range_type, + help="GID ranges the POSIXUsers tool will manage"), + Bcfg2.Options.Option( + cf=('POSIXUsers', 'uid_blacklist'), default=[], + type=uid_range_type, + help="UID ranges the POSIXUsers tool will not manage"), + Bcfg2.Options.Option( + cf=('POSIXUsers', 'gid_blacklist'), default=[], + type=uid_range_type, + help="GID ranges the POSIXUsers tool will not manage")] + __execs__ = ['/usr/sbin/useradd', '/usr/sbin/usermod', '/usr/sbin/userdel', '/usr/sbin/groupadd', '/usr/sbin/groupmod', '/usr/sbin/groupdel'] @@ -18,7 +42,6 @@ ('POSIXGroup', None)] __req__ = dict(POSIXUser=['name'], POSIXGroup=['name']) - experimental = True #: A mapping of XML entry attributes to the indexes of #: corresponding values in the get{pw|gr}all data structures @@ -30,25 +53,15 @@ #: user or group id_mapping = dict(POSIXUser="uid", POSIXGroup="gid") - def __init__(self, logger, setup, config): - Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config) + def __init__(self, config): + Bcfg2.Client.Tools.Tool.__init__(self, config) self.set_defaults = dict(POSIXUser=self.populate_user_entry, POSIXGroup=lambda g: g) self._existing = None - self._whitelist = dict(POSIXUser=None, POSIXGroup=None) - self._blacklist = dict(POSIXUser=None, POSIXGroup=None) - if self.setup['posix_uid_whitelist']: - self._whitelist['POSIXUser'] = \ - PackedDigitRange(*self.setup['posix_uid_whitelist']) - else: - self._blacklist['POSIXUser'] = \ - PackedDigitRange(*self.setup['posix_uid_blacklist']) - if self.setup['posix_gid_whitelist']: - self._whitelist['POSIXGroup'] = \ - PackedDigitRange(*self.setup['posix_gid_whitelist']) - else: - self._blacklist['POSIXGroup'] = \ - PackedDigitRange(*self.setup['posix_gid_blacklist']) + self._whitelist = dict(POSIXUser=Bcfg2.Options.setup.uid_whitelist, + POSIXGroup=Bcfg2.Options.setup.gid_whitelist) + self._blacklist = dict(POSIXUser=Bcfg2.Options.setup.uid_blacklist, + POSIXGroup=Bcfg2.Options.setup.gid_blacklist) @property def existing(self): @@ -66,7 +79,7 @@ defined, and the uid/gid is in that whitelist; or b) no whitelist is defined, and the uid/gid is not in the blacklist. """ - if self._whitelist[tag] is None: + if not self._whitelist[tag]: return eid not in self._blacklist[tag] else: return eid in self._whitelist[tag] @@ -87,7 +100,7 @@ return False return True - def Inventory(self, states, structures=None): + def Inventory(self, structures=None): if not structures: structures = self.config.getchildren() # we calculate a list of all POSIXUser and POSIXGroup entries, @@ -107,7 +120,8 @@ (group, entry.get("name"))) struct.append(Bcfg2.Client.XML.Element("POSIXGroup", name=group)) - return Bcfg2.Client.Tools.Tool.Inventory(self, states, structures) + return Bcfg2.Client.Tools.Tool.Inventory(self, structures) + Inventory.__doc__ = Bcfg2.Client.Tools.Tool.Inventory.__doc__ def FindExtra(self): extra = [] @@ -146,8 +160,8 @@ """ Get a list of supplmentary groups that the user in the given entry is a member of """ return [g for g in self.existing['POSIXGroup'].values() - if entry.get("name") in g[3] - and self._in_managed_range('POSIXGroup', g[2])] + if entry.get("name") in g[3] and + self._in_managed_range('POSIXGroup', g[2])] def VerifyPOSIXUser(self, entry, _): """ Verify a POSIXUser entry """ @@ -165,7 +179,7 @@ % (entry.tag, entry.get("name"), actual, expected)])) rv = False - if self.setup['interactive'] and not rv: + if Bcfg2.Options.setup.interactive and not rv: entry.set('qtext', '%s\nInstall %s %s: (y/N) ' % (entry.get('qtext', ''), entry.tag, entry.get('name'))) @@ -174,7 +188,7 @@ def VerifyPOSIXGroup(self, entry, _): """ Verify a POSIXGroup entry """ rv = self._verify(entry) - if self.setup['interactive'] and not rv: + if Bcfg2.Options.setup.interactive and not rv: entry.set('qtext', '%s\nInstall %s %s: (y/N) ' % (entry.get('qtext', ''), entry.tag, entry.get('name'))) @@ -191,7 +205,7 @@ for attr, idx in self.attr_mapping[entry.tag].items(): val = str(self.existing[entry.tag][entry.get("name")][idx]) entry.set("current_%s" % - attr, val.decode(self.setup['encoding'])) + attr, val.decode(Bcfg2.Options.setup.encoding)) if attr in ["uid", "gid"]: if entry.get(attr) is None: # no uid/gid specified, so we let the tool @@ -213,7 +227,8 @@ entry.set('qtext', "\n".join([entry.get('qtext', '')] + errors)) return len(errors) == 0 - def Install(self, entries, states): + def Install(self, entries): + states = dict() for entry in entries: # install groups first, so that all groups exist for # users that might need them @@ -223,6 +238,7 @@ if entry.tag == 'POSIXUser': states[entry] = self._install(entry) self._existing = None + return states def _install(self, entry): """ add or modify a user or group using the appropriate command """ diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/RcUpdate.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/RcUpdate.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/RcUpdate.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/RcUpdate.py 2017-01-10 19:18:17.000000000 +0000 @@ -98,10 +98,10 @@ # make sure service is disabled on boot bootcmd = '/sbin/rc-update del %s default' bootcmdrv = self.cmd.run(bootcmd % entry.get('name')).success - if self.setup['servicemode'] == 'disabled': + if Bcfg2.Options.setup.service_mode == 'disabled': # 'disabled' means we don't attempt to modify running svcs return bootcmdrv - buildmode = self.setup['servicemode'] == 'build' + buildmode = Bcfg2.Options.setup.service_mode == 'build' if (entry.get('status') == 'on' and not buildmode) and \ entry.get('current_status') == 'off': svccmdrv = self.start_service(entry) diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/RPMng.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/RPMng.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/RPMng.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/RPMng.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -""" RPM driver called 'RPMng' for backwards compat """ - -from Bcfg2.Client.Tools.RPM import RPM - - -class RPMng(RPM): - """ RPM driver called 'RPMng' for backwards compat """ - deprecated = True - name = "RPM" diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/RPM.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/RPM.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/RPM.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/RPM.py 2017-01-10 19:18:17.000000000 +0000 @@ -1,12 +1,1140 @@ """Bcfg2 Support for RPMS""" -import os.path +import os import rpm -import rpmtools import Bcfg2.Client.Tools +import grp +import optparse +import pwd +import stat +import sys +try: + import hashlib + py24compat = False +except ImportError: + # FIXME: Remove when client python dep is 2.5 or greater + py24compat = True + import md5 + +# Determine what prelink tools we have available. +# The isprelink module is a python extension that examines the ELF headers +# to see if the file has been prelinked. If it is not present a lot of files +# are unnecessarily run through the prelink command. +try: + from isprelink import * + isprelink_imported = True +except ImportError: + isprelink_imported = False + +# If the prelink command is installed on the system then we need to do +# prelink -y on files. +if os.access('/usr/sbin/prelink', os.X_OK): + prelink_exists = True +else: + prelink_exists = False + +# If we don't have isprelink then we will use the prelink configuration file to +# filter what we have to put through prelink -y. +import re +blacklist = [] +whitelist = [] +try: + f = open('/etc/prelink.conf', mode='r') + for line in f: + if line.startswith('#'): + continue + option, pattern = line.split() + if pattern.startswith('*.'): + pattern = pattern.replace('*.', '\.') + pattern += '$' + elif pattern.startswith('/'): + pattern = '^' + pattern + if option == '-b': + blacklist.append(pattern) + elif option == '-l': + whitelist.append(pattern) + f.close() +except IOError: + pass + +blacklist_re = re.compile('|'.join(blacklist)) +whitelist_re = re.compile('|'.join(whitelist)) + +# Flags that are not defined in rpm-python. +# They are defined in lib/rpmcli.h +# Bit(s) for verifyFile() attributes. +# +RPMVERIFY_NONE = 0 +RPMVERIFY_MD5 = 1 # 1 << 0 # from %verify(md5) +RPMVERIFY_FILESIZE = 2 # 1 << 1 # from %verify(size) +RPMVERIFY_LINKTO = 4 # 1 << 2 # from %verify(link) +RPMVERIFY_USER = 8 # 1 << 3 # from %verify(user) +RPMVERIFY_GROUP = 16 # 1 << 4 # from %verify(group) +RPMVERIFY_MTIME = 32 # 1 << 5 # from %verify(mtime) +RPMVERIFY_MODE = 64 # 1 << 6 # from %verify(mode) +RPMVERIFY_RDEV = 128 # 1 << 7 # from %verify(rdev) +RPMVERIFY_CONTEXTS = 32768 # (1 << 15) # from --nocontexts +RPMVERIFY_READLINKFAIL = 268435456 # (1 << 28) # readlink failed +RPMVERIFY_READFAIL = 536870912 # (1 << 29) # file read failed +RPMVERIFY_LSTATFAIL = 1073741824 # (1 << 30) # lstat failed +RPMVERIFY_LGETFILECONFAIL = 2147483648 # (1 << 31) # lgetfilecon failed + +RPMVERIFY_FAILURES = \ + (RPMVERIFY_LSTATFAIL | RPMVERIFY_READFAIL | + RPMVERIFY_READLINKFAIL | RPMVERIFY_LGETFILECONFAIL) + +# Bit(s) to control rpm_verify() operation. +# +VERIFY_DEFAULT = 0, # /*!< */ +VERIFY_MD5 = 1 << 0 # /*!< from --nomd5 */ +VERIFY_SIZE = 1 << 1 # /*!< from --nosize */ +VERIFY_LINKTO = 1 << 2 # /*!< from --nolinkto */ +VERIFY_USER = 1 << 3 # /*!< from --nouser */ +VERIFY_GROUP = 1 << 4 # /*!< from --nogroup */ +VERIFY_MTIME = 1 << 5 # /*!< from --nomtime */ +VERIFY_MODE = 1 << 6 # /*!< from --nomode */ +VERIFY_RDEV = 1 << 7 # /*!< from --nodev */ +# /* bits 8-14 unused, reserved for rpmVerifyAttrs */ +VERIFY_CONTEXTS = 1 << 15 # /*!< verify: from --nocontexts */ +VERIFY_FILES = 1 << 16 # /*!< verify: from --nofiles */ +VERIFY_DEPS = 1 << 17 # /*!< verify: from --nodeps */ +VERIFY_SCRIPT = 1 << 18 # /*!< verify: from --noscripts */ +VERIFY_DIGEST = 1 << 19 # /*!< verify: from --nodigest */ +VERIFY_SIGNATURE = 1 << 20 # /*!< verify: from --nosignature */ +VERIFY_PATCHES = 1 << 21 # /*!< verify: from --nopatches */ +VERIFY_HDRCHK = 1 << 22 # /*!< verify: from --nohdrchk */ +VERIFY_FOR_LIST = 1 << 23 # /*!< query: from --list */ +VERIFY_FOR_STATE = 1 << 24 # /*!< query: from --state */ +VERIFY_FOR_DOCS = 1 << 25 # /*!< query: from --docfiles */ +VERIFY_FOR_CONFIG = 1 << 26 # /*!< query: from --configfiles */ +VERIFY_FOR_DUMPFILES = 1 << 27 # /*!< query: from --dump */ +# /* bits 28-31 used in rpmVerifyAttrs */ + +# Comes from C cource. lib/rpmcli.h +VERIFY_ATTRS = \ + (VERIFY_MD5 | VERIFY_SIZE | VERIFY_LINKTO | VERIFY_USER | VERIFY_GROUP | + VERIFY_MTIME | VERIFY_MODE | VERIFY_RDEV | VERIFY_CONTEXTS) + +VERIFY_ALL = \ + (VERIFY_ATTRS | VERIFY_FILES | VERIFY_DEPS | VERIFY_SCRIPT | + VERIFY_DIGEST | VERIFY_SIGNATURE | VERIFY_HDRCHK) + + +# Some masks for what checks to NOT do on these file types. +# The C code actiually resets these up for every file. +DIR_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | + RPMVERIFY_LINKTO) + +# These file types all have the same mask, but hopefully this will make the +# code more readable. +FIFO_FLAGS = CHR_FLAGS = BLK_FLAGS = GHOST_FLAGS = DIR_FLAGS + +LINK_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | + RPMVERIFY_MODE | RPMVERIFY_USER | RPMVERIFY_GROUP) + +REG_FLAGS = ~(RPMVERIFY_LINKTO) + + +def s_isdev(mode): + """ + Check to see if a file is a device. + + """ + return stat.S_ISBLK(mode) | stat.S_ISCHR(mode) + + +def rpmpackagelist(rts): + """ + Equivalent of rpm -qa. Intended for RefreshPackages() in the RPM Driver. + Requires rpmtransactionset() to be run first to get a ts. + Returns a list of pkgspec dicts. + + e.g. [{'name':'foo', 'epoch':'20', 'version':'1.2', + 'release':'5', 'arch':'x86_64' }, + {'name':'bar', 'epoch':'10', 'version':'5.2', + 'release':'2', 'arch':'x86_64' }] + + """ + return [ + {'name': header[rpm.RPMTAG_NAME], + 'epoch': header[rpm.RPMTAG_EPOCH], + 'version': header[rpm.RPMTAG_VERSION], + 'release': header[rpm.RPMTAG_RELEASE], + 'arch': header[rpm.RPMTAG_ARCH], + 'gpgkeyid': + header.sprintf("%|SIGGPG?{%{SIGGPG:pgpsig}}:{None}|").split()[-1]} + for header in rts.dbMatch()] + + +def getindexbykeyword(index_ts, **kwargs): + """ + Return list of indexs from the rpmdb matching keywords + ex: getHeadersByKeyword(name='foo', version='1', release='1') + + Can be passed any structure that can be indexed by the pkgspec + keyswords as other keys are filtered out. + + """ + lst = [] + name = kwargs.get('name') + if name: + index_mi = index_ts.dbMatch(rpm.RPMTAG_NAME, name) + else: + index_mi = index_ts.dbMatch() + + if 'epoch' in kwargs: + if kwargs['epoch'] is not None and kwargs['epoch'] != 'None': + kwargs['epoch'] = int(kwargs['epoch']) + else: + del(kwargs['epoch']) + + keywords = [key for key in list(kwargs.keys()) + if key in ('name', 'epoch', 'version', 'release', 'arch')] + keywords_len = len(keywords) + for hdr in index_mi: + match = 0 + for keyword in keywords: + if hdr[keyword] == kwargs[keyword]: + match += 1 + if match == keywords_len: + lst.append(index_mi.instance()) + del index_mi + return lst + + +def getheadersbykeyword(header_ts, **kwargs): + """ + Borrowed parts of this from from Yum. Need to fix it though. + Epoch is not handled right. + + Return list of headers from the rpmdb matching keywords + ex: getHeadersByKeyword(name='foo', version='1', release='1') + + Can be passed any structure that can be indexed by the pkgspec + keyswords as other keys are filtered out. + + """ + lst = [] + name = kwargs.get('name') + if name: + header_mi = header_ts.dbMatch(rpm.RPMTAG_NAME, name) + else: + header_mi = header_ts.dbMatch() + + if 'epoch' in kwargs: + if kwargs['epoch'] is not None and kwargs['epoch'] != 'None': + kwargs['epoch'] = int(kwargs['epoch']) + else: + del(kwargs['epoch']) + + keywords = [key for key in list(kwargs.keys()) + if key in ('name', 'epoch', 'version', 'release', 'arch')] + keywords_len = len(keywords) + for hdr in header_mi: + match = 0 + for keyword in keywords: + if hdr[keyword] == kwargs[keyword]: + match += 1 + if match == keywords_len: + lst.append(hdr) + del header_mi + return lst + + +def prelink_md5_check(filename): + """ + Checks if a file is prelinked. If it is run it through prelink -y + to get the unprelinked md5 and file size. + + Return 0 if the file was not prelinked, otherwise return the file size. + Always return the md5. + + """ + prelink = False + try: + plf = open(filename, "rb") + except IOError: + return False, 0 + + if prelink_exists: + if isprelink_imported: + plfd = plf.fileno() + if isprelink(plfd): + plf.close() + cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \ + % (re.escape(filename)) + plf = os.popen(cmd, 'rb') + prelink = True + elif (whitelist_re.search(filename) and not + blacklist_re.search(filename)): + plf.close() + cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \ + % (re.escape(filename)) + plf = os.popen(cmd, 'rb') + prelink = True + + fsize = 0 + if py24compat: + chksum = md5.new() + else: + chksum = hashlib.md5() + while 1: + data = plf.read() + if not data: + break + fsize += len(data) + chksum.update(data) + plf.close() + file_md5 = chksum.hexdigest() + if prelink: + return file_md5, fsize + else: + return file_md5, 0 + + +def prelink_size_check(filename): + """ + This check is only done if the prelink_md5_check() is not done first. + + Checks if a file is prelinked. If it is run it through prelink -y + to get the unprelinked file size. + + Return 0 if the file was not prelinked, otherwise return the file size. + + """ + fsize = 0 + try: + plf = open(filename, "rb") + except IOError: + return False + + if prelink_exists: + if isprelink_imported: + plfd = plf.fileno() + if isprelink(plfd): + plf.close() + cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \ + % (re.escape(filename)) + plf = os.popen(cmd, 'rb') + + while 1: + data = plf.read() + if not data: + break + fsize += len(data) + + elif (whitelist_re.search(filename) and not + blacklist_re.search(filename)): + plf.close() + cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \ + % (re.escape(filename)) + plf = os.popen(cmd, 'rb') + + while 1: + data = plf.read() + if not data: + break + fsize += len(data) + + plf.close() + + return fsize + + +def debug_verify_flags(vflags): + """ + Decodes the verify flags bits. + """ + if vflags & RPMVERIFY_MD5: + print('RPMVERIFY_MD5') + if vflags & RPMVERIFY_FILESIZE: + print('RPMVERIFY_FILESIZE') + if vflags & RPMVERIFY_LINKTO: + print('RPMVERIFY_LINKTO') + if vflags & RPMVERIFY_USER: + print('RPMVERIFY_USER') + if vflags & RPMVERIFY_GROUP: + print('RPMVERIFY_GROUP') + if vflags & RPMVERIFY_MTIME: + print('RPMVERIFY_MTIME') + if vflags & RPMVERIFY_MODE: + print('RPMVERIFY_MODE') + if vflags & RPMVERIFY_RDEV: + print('RPMVERIFY_RDEV') + if vflags & RPMVERIFY_CONTEXTS: + print('RPMVERIFY_CONTEXTS') + if vflags & RPMVERIFY_READLINKFAIL: + print('RPMVERIFY_READLINKFAIL') + if vflags & RPMVERIFY_READFAIL: + print('RPMVERIFY_READFAIL') + if vflags & RPMVERIFY_LSTATFAIL: + print('RPMVERIFY_LSTATFAIL') + if vflags & RPMVERIFY_LGETFILECONFAIL: + print('RPMVERIFY_LGETFILECONFAIL') + + +def debug_file_flags(fflags): + """ + Decodes the file flags bits. + """ + if fflags & rpm.RPMFILE_CONFIG: + print('rpm.RPMFILE_CONFIG') + + if fflags & rpm.RPMFILE_DOC: + print('rpm.RPMFILE_DOC') + + if fflags & rpm.RPMFILE_ICON: + print('rpm.RPMFILE_ICON') + + if fflags & rpm.RPMFILE_MISSINGOK: + print('rpm.RPMFILE_MISSINGOK') + + if fflags & rpm.RPMFILE_NOREPLACE: + print('rpm.RPMFILE_NOREPLACE') + + if fflags & rpm.RPMFILE_GHOST: + print('rpm.RPMFILE_GHOST') + + if fflags & rpm.RPMFILE_LICENSE: + print('rpm.RPMFILE_LICENSE') + + if fflags & rpm.RPMFILE_README: + print('rpm.RPMFILE_README') + + if fflags & rpm.RPMFILE_EXCLUDE: + print('rpm.RPMFILE_EXLUDE') + + if fflags & rpm.RPMFILE_UNPATCHED: + print('rpm.RPMFILE_UNPATCHED') + + if fflags & rpm.RPMFILE_PUBKEY: + print('rpm.RPMFILE_PUBKEY') + + +def rpm_verify_file(fileinfo, rpmlinktos, omitmask): + """ + Verify all the files in a package. + + Returns a list of error flags, the file type and file name. The list + entries are strings that are the same as the labels for the bitwise + flags used in the C code. + + """ + (fname, fsize, fmode, fmtime, fflags, frdev, finode, fnlink, fstate, + vflags, fuser, fgroup, fmd5) = fileinfo + + # 1. rpmtsRootDir stuff. What does it do and where to I get it from? + + file_results = [] + flags = vflags + + # Check to see if the file was installed - if not pretend all is ok. + # This is what the rpm C code does! + if fstate != rpm.RPMFILE_STATE_NORMAL: + return file_results + + # Get the installed files stats + try: + lstat = os.lstat(fname) + except OSError: + if not (fflags & (rpm.RPMFILE_MISSINGOK | rpm.RPMFILE_GHOST)): + file_results.append('RPMVERIFY_LSTATFAIL') + #file_results.append(fname) + return file_results + + # 5. Contexts? SELinux stuff? + + # Setup what checks to do. This is straight out of the C code. + if stat.S_ISDIR(lstat.st_mode): + flags &= DIR_FLAGS + elif stat.S_ISLNK(lstat.st_mode): + flags &= LINK_FLAGS + elif stat.S_ISFIFO(lstat.st_mode): + flags &= FIFO_FLAGS + elif stat.S_ISCHR(lstat.st_mode): + flags &= CHR_FLAGS + elif stat.S_ISBLK(lstat.st_mode): + flags &= BLK_FLAGS + else: + flags &= REG_FLAGS + + if (fflags & rpm.RPMFILE_GHOST): + flags &= GHOST_FLAGS + + flags &= ~(omitmask | RPMVERIFY_FAILURES) + + # 8. SELinux stuff. + + prelink_size = 0 + if flags & RPMVERIFY_MD5: + prelink_md5, prelink_size = prelink_md5_check(fname) + if prelink_md5 is False: + file_results.append('RPMVERIFY_MD5') + file_results.append('RPMVERIFY_READFAIL') + elif prelink_md5 != fmd5: + file_results.append('RPMVERIFY_MD5') + + if flags & RPMVERIFY_LINKTO: + linkto = os.readlink(fname) + if not linkto: + file_results.append('RPMVERIFY_READLINKFAIL') + file_results.append('RPMVERIFY_LINKTO') + else: + if len(rpmlinktos) == 0 or linkto != rpmlinktos: + file_results.append('RPMVERIFY_LINKTO') + + if flags & RPMVERIFY_FILESIZE: + if not (flags & RPMVERIFY_MD5): # prelink check hasn't been done. + prelink_size = prelink_size_check(fname) + if (prelink_size != 0): # This is a prelinked file. + if (prelink_size != fsize): + file_results.append('RPMVERIFY_FILESIZE') + elif lstat.st_size != fsize: # It wasn't a prelinked file. + file_results.append('RPMVERIFY_FILESIZE') + + if flags & RPMVERIFY_MODE: + metamode = fmode + filemode = lstat.st_mode + + # Comparing the type of %ghost files is meaningless, but perms are ok. + if fflags & rpm.RPMFILE_GHOST: + metamode &= ~0xf000 + filemode &= ~0xf000 + + if (stat.S_IFMT(metamode) != stat.S_IFMT(filemode)) or \ + (stat.S_IMODE(metamode) != stat.S_IMODE(filemode)): + file_results.append('RPMVERIFY_MODE') + + if flags & RPMVERIFY_RDEV: + if (stat.S_ISCHR(fmode) != stat.S_ISCHR(lstat.st_mode) or + stat.S_ISBLK(fmode) != stat.S_ISBLK(lstat.st_mode)): + file_results.append('RPMVERIFY_RDEV') + elif (s_isdev(fmode) & s_isdev(lstat.st_mode)): + st_rdev = lstat.st_rdev + if frdev != st_rdev: + file_results.append('RPMVERIFY_RDEV') + + if flags & RPMVERIFY_MTIME: + if lstat.st_mtime != fmtime: + file_results.append('RPMVERIFY_MTIME') + + if flags & RPMVERIFY_USER: + try: + user = pwd.getpwuid(lstat.st_uid)[0] + except KeyError: + user = None + if not user or not fuser or (user != fuser): + file_results.append('RPMVERIFY_USER') + + if flags & RPMVERIFY_GROUP: + try: + group = grp.getgrgid(lstat.st_gid)[0] + except KeyError: + group = None + if not group or not fgroup or (group != fgroup): + file_results.append('RPMVERIFY_GROUP') + + return file_results + + +def rpm_verify_dependencies(header): + """ + Check package dependencies. Header is an rpm.hdr. + + Don't like opening another ts to do this, but + it was the only way I could find of clearing the ts + out. + + Have asked on the rpm-maint list on how to do + this the right way (28 Feb 2007). + + ts.check() returns: + + ((name, version, release), (reqname, reqversion), \ + flags, suggest, sense) + + """ + _ts1 = rpmtransactionset() + _ts1.addInstall(header, 'Dep Check', 'i') + dep_errors = _ts1.check() + _ts1.closeDB() + return dep_errors + + +def rpm_verify_package(vp_ts, header, verify_options): + """ + Verify a single package specified by header. Header is an rpm.hdr. + + If errors are found it returns a dictionary of errors. + + """ + # Set some transaction level flags. + vsflags = 0 + if 'nodigest' in verify_options: + vsflags |= rpm._RPMVSF_NODIGESTS + if 'nosignature' in verify_options: + vsflags |= rpm._RPMVSF_NOSIGNATURES + ovsflags = vp_ts.setVSFlags(vsflags) + + # Map from the Python options to the rpm bitwise flags. + omitmask = 0 + + if 'nolinkto' in verify_options: + omitmask |= VERIFY_LINKTO + if 'nomd5' in verify_options: + omitmask |= VERIFY_MD5 + if 'nosize' in verify_options: + omitmask |= VERIFY_SIZE + if 'nouser' in verify_options: + omitmask |= VERIFY_USER + if 'nogroup' in verify_options: + omitmask |= VERIFY_GROUP + if 'nomtime' in verify_options: + omitmask |= VERIFY_MTIME + if 'nomode' in verify_options: + omitmask |= VERIFY_MODE + if 'nordev' in verify_options: + omitmask |= VERIFY_RDEV + + omitmask = ((~omitmask & VERIFY_ATTRS) ^ VERIFY_ATTRS) + + package_results = {} + + # Check Signatures and Digests. + # No idea what this might return. Need to break something to see. + # Setting the vsflags above determines what gets checked in the header. + hdr_stat = vp_ts.hdrCheck(header.unload()) + if hdr_stat: + package_results['hdr'] = hdr_stat + + # Check Package Depencies. + if 'nodeps' not in verify_options: + dep_stat = rpm_verify_dependencies(header) + if dep_stat: + package_results['deps'] = dep_stat + + # Check all the package files. + if 'nofiles' not in verify_options: + vp_fi = header.fiFromHeader() + for fileinfo in vp_fi: + # Do not bother doing anything with ghost files. + # This is what RPM does. + if fileinfo[4] & rpm.RPMFILE_GHOST: + continue + + # This is only needed because of an inconsistency in the + # rpm.fi interface. + linktos = vp_fi.FLink() + + file_stat = rpm_verify_file(fileinfo, linktos, omitmask) + + #if len(file_stat) > 0 or options.verbose: + if len(file_stat) > 0: + fflags = fileinfo[4] + if fflags & rpm.RPMFILE_CONFIG: + file_stat.append('c') + elif fflags & rpm.RPMFILE_DOC: + file_stat.append('d') + elif fflags & rpm.RPMFILE_GHOST: + file_stat.append('g') + elif fflags & rpm.RPMFILE_LICENSE: + file_stat.append('l') + elif fflags & rpm.RPMFILE_PUBKEY: + file_stat.append('P') + elif fflags & rpm.RPMFILE_README: + file_stat.append('r') + else: + file_stat.append(' ') + + file_stat.append(fileinfo[0]) # The filename. + package_results.setdefault('files', []).append(file_stat) + + # Run the verify script if there is one. + # Do we want this? + #if 'noscripts' not in verify_options: + # script_stat = rpmVerifyscript() + # if script_stat: + # package_results['script'] = script_stat + + # If there have been any errors, add the package nevra to the result. + if len(package_results) > 0: + package_results.setdefault('nevra', (header[rpm.RPMTAG_NAME], + header[rpm.RPMTAG_EPOCH], + header[rpm.RPMTAG_VERSION], + header[rpm.RPMTAG_RELEASE], + header[rpm.RPMTAG_ARCH])) + else: + package_results = None + + # Put things back the way we found them. + vsflags = vp_ts.setVSFlags(ovsflags) + + return package_results + + +def rpm_verify(verify_ts, verify_pkgspec, verify_options=[]): + """ + Requires rpmtransactionset() to be run first to get a ts. + + pkgspec is a dict specifying the package + e.g.: + For a single package + { name='foo', epoch='20', version='1', release='1', arch='x86_64'} + + For all packages + {} + + Or any combination of keywords to select one or more packages to verify. + + options is a list of 'rpm --verify' options. + Default is to check everything. + e.g.: + [ 'nodeps', 'nodigest', 'nofiles', 'noscripts', 'nosignature', + 'nolinkto' 'nomd5', 'nosize', 'nouser', 'nogroup', 'nomtime', + 'nomode', 'nordev' ] + + Returns a list. One list entry per package. Each list entry is a + dictionary. Dict keys are 'files', 'deps', 'nevra' and 'hdr'. + Entries only get added for the failures. If nothing failed, None is + returned. + + Its all a bit messy and probably needs reviewing. + + [ { 'hdr': [???], + 'deps: [((name, version, release), (reqname, reqversion), + flags, suggest, sense), .... ] + 'files': [ ['filename1', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER' ], + ['filename2', 'RPMVERFIY_LSTATFAIL']] + 'nevra': ['name1', 'epoch1', 'version1', 'release1', 'arch1'] } + { 'hdr': [???], + 'deps: [((name, version, release), (reqname, reqversion), + flags, suggest, sense), .... ] + 'files': [ ['filename', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER" ], + ['filename2', 'RPMVERFIY_LSTATFAIL']] + 'nevra': ['name2', 'epoch2', 'version2', 'release2', 'arch2'] } ] + + """ + verify_results = [] + headers = getheadersbykeyword(verify_ts, **verify_pkgspec) + for header in headers: + result = rpm_verify_package(verify_ts, header, verify_options) + if result: + verify_results.append(result) + + return verify_results + + +def rpmtransactionset(): + """ + A simple wrapper for rpm.TransactionSet() to keep everthiing together. + Might use it to set some ts level flags later. + + """ + ts = rpm.TransactionSet() + return ts + + +class Rpmtscallback(object): + """ + Callback for ts.run(). Used for adding, upgrading and removing packages. + Starting with all possible reasons codes, but bcfg2 will probably only + make use of a few of them. + + Mostly just printing stuff at the moment to understand how the callback + is used. + """ + def __init__(self): + self.fdnos = {} + + def callback(self, reason, amount, total, key, client_data): + """ + Generic rpmts call back. + """ + if reason == rpm.RPMCALLBACK_INST_OPEN_FILE: + pass + elif reason == rpm.RPMCALLBACK_INST_CLOSE_FILE: + pass + elif reason == rpm.RPMCALLBACK_INST_START: + pass + elif reason == rpm.RPMCALLBACK_TRANS_PROGRESS or \ + reason == rpm.RPMCALLBACK_INST_PROGRESS: + pass + # rpm.RPMCALLBACK_INST_PROGRESS' + elif reason == rpm.RPMCALLBACK_TRANS_START: + pass + elif reason == rpm.RPMCALLBACK_TRANS_STOP: + pass + elif reason == rpm.RPMCALLBACK_REPACKAGE_START: + pass + elif reason == rpm.RPMCALLBACK_REPACKAGE_PROGRESS: + pass + elif reason == rpm.RPMCALLBACK_REPACKAGE_STOP: + pass + elif reason == rpm.RPMCALLBACK_UNINST_PROGRESS: + pass + elif reason == rpm.RPMCALLBACK_UNINST_START: + pass + elif reason == rpm.RPMCALLBACK_UNINST_STOP: + pass + # How do we get at this? + # RPM.modified += key + elif reason == rpm.RPMCALLBACK_UNPACK_ERROR: + pass + elif reason == rpm.RPMCALLBACK_CPIO_ERROR: + pass + elif reason == rpm.RPMCALLBACK_UNKNOWN: + pass + else: + print('ERROR - Fell through callBack') + + +def rpm_erase(erase_pkgspecs, erase_flags): + """ + pkgspecs is a list of pkgspec dicts specifying packages + e.g.: + For a single package + { name='foo', epoch='20', version='1', release='1', arch='x86_64'} + + """ + erase_ts_flags = 0 + if 'noscripts' in erase_flags: + erase_ts_flags |= rpm.RPMTRANS_FLAG_NOSCRIPTS + if 'notriggers' in erase_flags: + erase_ts_flags |= rpm.RPMTRANS_FLAG_NOTRIGGERS + if 'repackage' in erase_flags: + erase_ts_flags |= rpm.RPMTRANS_FLAG_REPACKAGE + + erase_ts = rpmtransactionset() + erase_ts.setFlags(erase_ts_flags) + + for pkgspec in erase_pkgspecs: + idx_list = getindexbykeyword(erase_ts, **pkgspec) + if len(idx_list) > 1 and not 'allmatches' in erase_flags: + #pass + print('ERROR - Multiple package match for erase', pkgspec) + else: + for idx in idx_list: + erase_ts.addErase(idx) + + #for te in erase_ts: + + erase_problems = [] + if 'nodeps' not in erase_flags: + erase_problems = erase_ts.check() + + if erase_problems == []: + erase_ts.order() + erase_callback = Rpmtscallback() + erase_ts.run(erase_callback.callback, 'Erase') + #else: + + erase_ts.closeDB() + del erase_ts + return erase_problems + + +def display_verify_file(file_results): + ''' + Display file results similar to rpm --verify. + ''' + filename = file_results[-1] + filetype = file_results[-2] + + result_string = '' + + if 'RPMVERIFY_LSTATFAIL' in file_results: + result_string = 'missing ' + else: + if 'RPMVERIFY_FILESIZE' in file_results: + result_string = result_string + 'S' + else: + result_string = result_string + '.' + + if 'RPMVERIFY_MODE' in file_results: + result_string = result_string + 'M' + else: + result_string = result_string + '.' + + if 'RPMVERIFY_MD5' in file_results: + if 'RPMVERIFY_READFAIL' in file_results: + result_string = result_string + '?' + else: + result_string = result_string + '5' + else: + result_string = result_string + '.' + + if 'RPMVERIFY_RDEV' in file_results: + result_string = result_string + 'D' + else: + result_string = result_string + '.' + + if 'RPMVERIFY_LINKTO' in file_results: + if 'RPMVERIFY_READLINKFAIL' in file_results: + result_string = result_string + '?' + else: + result_string = result_string + 'L' + else: + result_string = result_string + '.' + + if 'RPMVERIFY_USER' in file_results: + result_string = result_string + 'U' + else: + result_string = result_string + '.' + + if 'RPMVERIFY_GROUP' in file_results: + result_string = result_string + 'G' + else: + result_string = result_string + '.' + + if 'RPMVERIFY_MTIME' in file_results: + result_string = result_string + 'T' + else: + result_string = result_string + '.' + + print(result_string + ' ' + filetype + ' ' + filename) + sys.stdout.flush() + +#============================================================================= +# Some options and output to assist with development and testing. +# These are not intended for normal use. +if __name__ == "__main__": + + p = optparse.OptionParser() + + p.add_option('--name', action='store', + default=None, + help='''Package name to verify. + + ****************************************** + NOT SPECIFYING A NAME MEANS 'ALL' PACKAGES. + ****************************************** + + The specified operation will be carried out on all + instances of packages that match the package + specification + (name, epoch, version, release, arch).''') + + p.add_option('--epoch', action='store', + default=None, + help='''Package epoch.''') + + p.add_option('--version', action='store', + default=None, + help='''Package version.''') + + p.add_option('--release', action='store', + default=None, + help='''Package release.''') + + p.add_option('--arch', action='store', + default=None, + help='''Package arch.''') + + p.add_option('--erase', '-e', action='store_true', + default=None, + help= + '''**************************************************** + REMOVE PACKAGES. THERE ARE NO WARNINGS. MULTIPLE + PACKAGES WILL BE REMOVED IF A FULL PACKAGE SPEC IS NOT + GIVEN. E.G. IF JUST A NAME IS GIVEN ALL INSTALLED + INSTANCES OF THAT PACKAGE WILL BE REMOVED PROVIDED + DEPENDENCY CHECKS PASS. IF JUST AN EPOCH IS GIVEN + ALL PACKAGE INSTANCES WITH THAT EPOCH WILL BE REMOVED. + ****************************************************''') + + p.add_option('--list', '-l', action='store_true', + help='''List package identity info. rpm -qa ish equivalent + intended for use in RefreshPackages().''') + + p.add_option('--verify', action='store_true', + help='''Verify Package(s). Output is only produced after all + packages has been verified. Be patient.''') + + p.add_option('--verbose', '-v', action='store_true', + help='''Verbose output for --verify option. Output is the + same as rpm -v --verify.''') + + p.add_option('--nodeps', action='store_true', + default=False, + help='Do not do dependency testing.') + + p.add_option('--nodigest', action='store_true', + help='Do not check package digests.') + + p.add_option('--nofiles', action='store_true', + help='Do not do file checks.') + + p.add_option('--noscripts', action='store_true', + help='Do not run verification scripts.') + + p.add_option('--nosignature', action='store_true', + help='Do not do package signature verification.') + + p.add_option('--nolinkto', action='store_true', + help='Do not do symlink tests.') + + p.add_option('--nomd5', action='store_true', + help='''Do not do MD5 checksums on files. Note that this does + not work for prelink files yet.''') + + p.add_option('--nosize', action='store_true', + help='''Do not do file size tests. Note that this does not + work for prelink files yet.''') + + p.add_option('--nouser', action='store_true', + help='Do not check file user ownership.') + + p.add_option('--nogroup', action='store_true', + help='Do not check file group ownership.') + + p.add_option('--nomtime', action='store_true', + help='Do not check file modification times.') + + p.add_option('--nomode', action='store_true', + help='Do not check file modes (permissions).') + + p.add_option('--nordev', action='store_true', + help='Do not check device node.') + + p.add_option('--notriggers', action='store_true', + help='Do not do not generate triggers on erase.') + + p.add_option('--repackage', action='store_true', + help='''Do repackage on erase.i Packages are put + in /var/spool/repackage.''') + + p.add_option('--allmatches', action='store_true', + help= + '''Remove all package instances that match the + pkgspec. + + *************************************************** + NO WARNINGS ARE GIVEN. IF THERE IS NO PACKAGE SPEC + THAT MEANS ALL PACKAGES!!!! + ***************************************************''') + + options, arguments = p.parse_args() + + pkgspec = {} + rpm_options = [] + + if options.nodeps: + rpm_options.append('nodeps') + + if options.nodigest: + rpm_options.append('nodigest') + + if options.nofiles: + rpm_options.append('nofiles') + + if options.noscripts: + rpm_options.append('noscripts') + + if options.nosignature: + rpm_options.append('nosignature') + + if options.nolinkto: + rpm_options.append('nolinkto') + + if options.nomd5: + rpm_options.append('nomd5') + + if options.nosize: + rpm_options.append('nosize') + + if options.nouser: + rpm_options.append('nouser') + + if options.nogroup: + rpm_options.append('nogroup') + + if options.nomtime: + rpm_options.append('nomtime') + + if options.nomode: + rpm_options.append('nomode') + + if options.nordev: + rpm_options.append('nordev') + + if options.repackage: + rpm_options.append('repackage') + + if options.allmatches: + rpm_options.append('allmatches') + + main_ts = rpmtransactionset() + + cmdline_pkgspec = {} + if options.name != 'all': + if options.name: + cmdline_pkgspec['name'] = str(options.name) + if options.epoch: + cmdline_pkgspec['epoch'] = str(options.epoch) + if options.version: + cmdline_pkgspec['version'] = str(options.version) + if options.release: + cmdline_pkgspec['release'] = str(options.release) + if options.arch: + cmdline_pkgspec['arch'] = str(options.arch) + + if options.verify: + results = rpm_verify(main_ts, cmdline_pkgspec, rpm_options) + for r in results: + files = r.get('files', '') + for f in files: + display_verify_file(f) + + elif options.list: + for p in rpmpackagelist(main_ts): + print(p) + + elif options.erase: + if options.name: + rpm_erase([cmdline_pkgspec], rpm_options) + else: + print('You must specify the "--name" option') + class RPM(Bcfg2.Client.Tools.PkgTool): """Support for RPM packages.""" + options = Bcfg2.Client.Tools.PkgTool.options + [ + Bcfg2.Options.Option( + cf=('RPM', 'installonlypackages'), dest="rpm_installonly", + type=Bcfg2.Options.Types.comma_list, + default=['kernel', 'kernel-bigmem', 'kernel-enterprise', + 'kernel-smp', 'kernel-modules', 'kernel-debug', + 'kernel-unsupported', 'kernel-devel', 'kernel-source', + 'kernel-default', 'kernel-largesmp-devel', + 'kernel-largesmp', 'kernel-xen', 'gpg-pubkey'], + help='RPM install-only packages'), + Bcfg2.Options.BooleanOption( + cf=('RPM', 'pkg_checks'), default=True, dest="rpm_pkg_checks", + help="Perform RPM package checks"), + Bcfg2.Options.BooleanOption( + cf=('RPM', 'pkg_verify'), default=True, dest="rpm_pkg_verify", + help="Perform RPM package verify"), + Bcfg2.Options.BooleanOption( + cf=('RPM', 'install_missing'), default=True, + dest="rpm_install_missing", + help="Install missing packages"), + Bcfg2.Options.Option( + cf=('RPM', 'erase_flags'), default=["allmatches"], + dest="rpm_erase_flags", type=Bcfg2.Options.Types.comma_list, + help="RPM erase flags"), + Bcfg2.Options.BooleanOption( + cf=('RPM', 'fix_version'), default=True, + dest="rpm_fix_version", + help="Fix (upgrade or downgrade) packages with the wrong version"), + Bcfg2.Options.BooleanOption( + cf=('RPM', 'reinstall_broken'), default=True, + dest="rpm_reinstall_broken", + help="Reinstall packages that fail to verify"), + Bcfg2.Options.Option( + cf=('RPM', 'verify_flags'), default=[], + dest="rpm_verify_flags", type=Bcfg2.Options.Types.comma_list, + help="RPM verify flags")] + __execs__ = ['/bin/rpm', '/var/lib/rpm'] __handles__ = [('Package', 'rpm')] @@ -15,7 +1143,7 @@ __new_req__ = {'Package': ['name'], 'Instance': ['version', 'release', 'arch']} - __new_ireq__ = {'Package': ['uri'], \ + __new_ireq__ = {'Package': ['uri'], 'Instance': ['simplefile']} __gpg_req__ = {'Package': ['name', 'version']} @@ -26,60 +1154,51 @@ __new_gpg_ireq__ = {'Package': ['name'], 'Instance': ['version', 'release']} - conflicts = ['RPMng'] - pkgtype = 'rpm' pkgtool = ("rpm --oldpackage --replacepkgs --quiet -U %s", ("%s", ["url"])) - def __init__(self, logger, setup, config): - Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config) + def __init__(self, config): + Bcfg2.Client.Tools.PkgTool.__init__(self, config) # create a global ignore list used when ignoring particular # files during package verification - self.ignores = [entry.get('name') for struct in config for entry in struct \ - if entry.get('type') == 'ignore'] + self.ignores = [entry.get('name') for struct in config + for entry in struct if entry.get('type') == 'ignore'] self.instance_status = {} self.extra_instances = [] self.modlists = {} self.gpg_keyids = self.getinstalledgpg() - opt_prefix = self.name.lower() - self.installOnlyPkgs = self.setup["%s_installonly" % opt_prefix] + self.installOnlyPkgs = Bcfg2.Options.setup.rpm_installonly if 'gpg-pubkey' not in self.installOnlyPkgs: self.installOnlyPkgs.append('gpg-pubkey') - self.erase_flags = self.setup['%s_erase_flags' % opt_prefix] - self.pkg_checks = self.setup['%s_pkg_checks' % opt_prefix] - self.pkg_verify = self.setup['%s_pkg_verify' % opt_prefix] - self.installed_action = self.setup['%s_installed_action' % opt_prefix] - self.version_fail_action = self.setup['%s_version_fail_action' % - opt_prefix] - self.verify_fail_action = self.setup['%s_verify_fail_action' % - opt_prefix] - self.verify_flags = self.setup['%s_verify_flags' % opt_prefix] + self.verify_flags = Bcfg2.Options.setup.rpm_verify_flags if '' in self.verify_flags: self.verify_flags.remove('') self.logger.debug('%s: installOnlyPackages = %s' % (self.name, self.installOnlyPkgs)) self.logger.debug('%s: erase_flags = %s' % - (self.name, self.erase_flags)) + (self.name, Bcfg2.Options.setup.rpm_erase_flags)) self.logger.debug('%s: pkg_checks = %s' % - (self.name, self.pkg_checks)) + (self.name, Bcfg2.Options.setup.rpm_pkg_checks)) self.logger.debug('%s: pkg_verify = %s' % - (self.name, self.pkg_verify)) - self.logger.debug('%s: installed_action = %s' % - (self.name, self.installed_action)) - self.logger.debug('%s: version_fail_action = %s' % - (self.name, self.version_fail_action)) - self.logger.debug('%s: verify_fail_action = %s' % - (self.name, self.verify_fail_action)) + (self.name, Bcfg2.Options.setup.rpm_pkg_verify)) + self.logger.debug('%s: install_missing = %s' % + (self.name, Bcfg2.Options.setup.rpm_install_missing)) + self.logger.debug('%s: fix_version = %s' % + (self.name, Bcfg2.Options.setup.rpm_fix_version)) + self.logger.debug('%s: reinstall_broken = %s' % + (self.name, + Bcfg2.Options.setup.rpm_reinstall_broken)) self.logger.debug('%s: verify_flags = %s' % (self.name, self.verify_flags)) # Force a re- prelink of all packages if prelink exists. # Many, if not most package verifies can be caused by out of # date prelinking. - if os.path.isfile('/usr/sbin/prelink') and not self.setup['dryrun']: + if (os.path.isfile('/usr/sbin/prelink') and + not Bcfg2.Options.setup.dry_run): rv = self.cmd.run('/usr/sbin/prelink -a -mR') if rv.success: self.logger.debug('Pre-emptive prelink succeeded') @@ -104,18 +1223,18 @@ 'arch':'x86_64'} ] """ self.installed = {} - refresh_ts = rpmtools.rpmtransactionset() + refresh_ts = rpmtransactionset() # Don't bother with signature checks at this stage. The GPG keys might # not be installed. - refresh_ts.setVSFlags(rpm._RPMVSF_NODIGESTS|rpm._RPMVSF_NOSIGNATURES) - for nevra in rpmtools.rpmpackagelist(refresh_ts): + refresh_ts.setVSFlags(rpm._RPMVSF_NODIGESTS | rpm._RPMVSF_NOSIGNATURES) + for nevra in rpmpackagelist(refresh_ts): self.installed.setdefault(nevra['name'], []).append(nevra) - if self.setup['debug']: + if Bcfg2.Options.setup.debug: print("The following package instances are installed:") for name, instances in list(self.installed.items()): self.logger.debug(" " + name) for inst in instances: - self.logger.debug(" %s" %self.str_evra(inst)) + self.logger.debug(" %s" % self.str_evra(inst)) refresh_ts.closeDB() del refresh_ts @@ -145,18 +1264,19 @@ Constructs the text prompts for interactive mode. """ - instances = [inst for inst in entry if inst.tag == 'Instance' or inst.tag == 'Package'] + instances = [inst for inst in entry if inst.tag == 'Instance' or + inst.tag == 'Package'] if instances == []: # We have an old style no Instance entry. Convert it to new style. instance = Bcfg2.Client.XML.SubElement(entry, 'Package') for attrib in list(entry.attrib.keys()): instance.attrib[attrib] = entry.attrib[attrib] - if (self.pkg_checks and - entry.get('pkg_checks', 'true').lower() == 'true'): + if (Bcfg2.Options.setup.rpm_pkg_checks and + entry.get('pkg_checks', 'true').lower() == 'true'): if 'any' in [entry.get('version'), pinned_version]: version, release = 'any', 'any' elif entry.get('version') == 'auto': - if pinned_version != None: + if pinned_version is not None: version, release = pinned_version.split('-') else: return False @@ -166,242 +1286,315 @@ instance.set('release', release) if entry.get('verify', 'true') == 'false': instance.set('verify', 'false') - instances = [ instance ] + instances = [instance] - self.logger.debug("Verifying package instances for %s" % entry.get('name')) + self.logger.debug("Verifying package instances for %s" % + entry.get('name')) package_fail = False qtext_versions = '' if entry.get('name') in self.installed: # There is at least one instance installed. - if (self.pkg_checks and - entry.get('pkg_checks', 'true').lower() == 'true'): + if (Bcfg2.Options.setup.rpm_pkg_checks and + entry.get('pkg_checks', 'true').lower() == 'true'): rpmTs = rpm.TransactionSet() rpmHeader = None for h in rpmTs.dbMatch(rpm.RPMTAG_NAME, entry.get('name')): - if rpmHeader is None or rpm.versionCompare(h, rpmHeader) > 0: + if rpmHeader is None or \ + rpm.versionCompare(h, rpmHeader) > 0: rpmHeader = h - rpmProvides = [ h['provides'] for h in \ - rpmTs.dbMatch(rpm.RPMTAG_NAME, entry.get('name')) ] + rpmProvides = [h['provides'] for h in + rpmTs.dbMatch(rpm.RPMTAG_NAME, + entry.get('name'))] rpmIntersection = set(rpmHeader['provides']) & \ - set(self.installOnlyPkgs) + set(self.installOnlyPkgs) if len(rpmIntersection) > 0: # Packages that should only be installed or removed. # e.g. kernels. self.logger.debug(" Install only package.") for inst in instances: - self.instance_status.setdefault(inst, {})['installed'] = False + self.instance_status.setdefault(inst, {})['installed']\ + = False self.instance_status[inst]['version_fail'] = False - if inst.tag == 'Package' and len(self.installed[entry.get('name')]) > 1: - self.logger.error("WARNING: Multiple instances of package %s are installed." % \ + if inst.tag == 'Package' and \ + len(self.installed[entry.get('name')]) > 1: + self.logger.error("WARNING: Multiple instances of " + "package %s are installed." % (entry.get('name'))) for pkg in self.installed[entry.get('name')]: - if inst.get('version') == 'any' or self.pkg_vr_equal(inst, pkg) \ - or self.inst_evra_equal(inst, pkg): + if inst.get('version') == 'any' or \ + self.pkg_vr_equal(inst, pkg) or \ + self.inst_evra_equal(inst, pkg): if inst.get('version') == 'any': self.logger.error("got any version") - self.logger.debug(" %s" % self.str_evra(inst)) + self.logger.debug(" %s" % + self.str_evra(inst)) self.instance_status[inst]['installed'] = True - if (self.pkg_verify and - inst.get('pkg_verify', 'true').lower() == 'true'): - flags = inst.get('verify_flags', '').split(',') + self.verify_flags + if (Bcfg2.Options.setup.rpm_pkg_verify and + inst.get('pkg_verify', + 'true').lower() == 'true'): + flags = inst.get('verify_flags', + '').split(',') + \ + self.verify_flags if pkg.get('gpgkeyid', '')[-8:] not in self.gpg_keyids and \ entry.get('name') != 'gpg-pubkey': flags += ['nosignature', 'nodigest'] - self.logger.debug('WARNING: Package %s %s requires GPG Public key with ID %s'\ - % (pkg.get('name'), self.str_evra(pkg), \ - pkg.get('gpgkeyid', ''))) - self.logger.debug(' Disabling signature check.') + self.logger.debug('WARNING: Package ' + '%s %s requires GPG ' + 'Public key with ID ' + '%s' % + (pkg.get('name'), + self.str_evra(pkg), + pkg.get('gpgkeyid', + ''))) + self.logger.debug(' Disabling ' + 'signature check.') - if self.setup.get('quick', False): - if rpmtools.prelink_exists: + if Bcfg2.Options.setup.quick: + if prelink_exists: flags += ['nomd5', 'nosize'] else: flags += ['nomd5'] - self.logger.debug(" verify_flags = %s" % flags) + self.logger.debug(" verify_flags = " + "%s" % flags) if inst.get('verify', 'true') == 'false': - self.instance_status[inst]['verify'] = None + self.instance_status[inst]['verify'] =\ + None else: - vp_ts = rpmtools.rpmtransactionset() - self.instance_status[inst]['verify'] = \ - rpmtools.rpm_verify( vp_ts, pkg, flags) + vp_ts = rpmtransactionset() + self.instance_status[inst]['verify'] =\ + rpm_verify(vp_ts, pkg, flags) vp_ts.closeDB() del vp_ts - if self.instance_status[inst]['installed'] == False: - self.logger.info(" Package %s %s not installed." % \ - (entry.get('name'), self.str_evra(inst))) + if not self.instance_status[inst]['installed']: + self.logger.info(" Package %s %s not " + "installed." % + (entry.get('name'), + self.str_evra(inst))) - qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst) + qtext_versions = qtext_versions + 'I(%s) ' % \ + self.str_evra(inst) entry.set('current_exists', 'false') else: # Normal Packages that can be upgraded. for inst in instances: - self.instance_status.setdefault(inst, {})['installed'] = False + self.instance_status.setdefault(inst, {})['installed']\ + = False self.instance_status[inst]['version_fail'] = False - # Only installed packages with the same architecture are - # relevant. - if inst.get('arch', None) == None: + # only installed packages with the same architecture + # are relevant. + if inst.get('arch', None) is None: arch_match = self.installed[entry.get('name')] else: - arch_match = [pkg for pkg in self.installed[entry.get('name')] \ - if pkg.get('arch', None) == inst.get('arch', None)] + arch_match = [pkg for pkg in + self.installed[entry.get('name')] + if pkg.get('arch', None) == + inst.get('arch', None)] if len(arch_match) > 1: - self.logger.error("Multiple instances of package %s installed with the same achitecture." % \ - (entry.get('name'))) + self.logger.error("Multiple instances of package " + "%s installed with the same " + "achitecture." % + (entry.get('name'))) elif len(arch_match) == 1: # There is only one installed like there should be. # Check that it is the right version. for pkg in arch_match: - if inst.get('version') == 'any' or self.pkg_vr_equal(inst, pkg) or \ - self.inst_evra_equal(inst, pkg): - self.logger.debug(" %s" % self.str_evra(inst)) - self.instance_status[inst]['installed'] = True - - if (self.pkg_verify and - inst.get('pkg_verify', 'true').lower() == 'true'): - flags = inst.get('verify_flags', '').split(',') + self.verify_flags - if pkg.get('gpgkeyid', '')[-8:] not in self.gpg_keyids and \ - 'nosignature' not in flags: - flags += ['nosignature', 'nodigest'] - self.logger.info('WARNING: Package %s %s requires GPG Public key with ID %s'\ - % (pkg.get('name'), self.str_evra(pkg), \ - pkg.get('gpgkeyid', ''))) - self.logger.info(' Disabling signature check.') + if inst.get('version') == 'any' or \ + self.pkg_vr_equal(inst, pkg) or \ + self.inst_evra_equal(inst, pkg): + self.logger.debug(" %s" % + self.str_evra(inst)) + self.instance_status[inst]['installed'] = \ + True + + if (Bcfg2.Options.setup.rpm_pkg_verify and + inst.get( + 'pkg_verify', + 'true').lower() == 'true'): + flags = inst.get('verify_flags', + '').split(',') + \ + self.verify_flags + if pkg.get('gpgkeyid', '')[-8:] not in\ + self.gpg_keyids and 'nosignature'\ + not in flags: + flags += ['nosignature', + 'nodigest'] + self.logger.info( + 'WARNING: Package %s %s ' + 'requires GPG Public key with ' + 'ID %s' % (pkg.get('name'), + self.str_evra(pkg), + pkg.get('gpgkeyid', + ''))) + self.logger.info( + ' Disabling signature ' + 'check.') - if self.setup.get('quick', False): - if rpmtools.prelink_exists: + if Bcfg2.Options.setup.quick: + if prelink_exists: flags += ['nomd5', 'nosize'] else: flags += ['nomd5'] - self.logger.debug(" verify_flags = %s" % flags) + self.logger.debug( + " verify_flags = %s" % + flags) - if inst.get('verify', 'true') == 'false': + if inst.get('verify', 'true') == \ + 'false': self.instance_status[inst]['verify'] = None else: - vp_ts = rpmtools.rpmtransactionset() - self.instance_status[inst]['verify'] = \ - rpmtools.rpm_verify( vp_ts, pkg, flags ) + vp_ts = rpmtransactionset() + self.instance_status[inst]['verify'] = rpm_verify(vp_ts, pkg, flags) vp_ts.closeDB() del vp_ts else: # Wrong version installed. - self.instance_status[inst]['version_fail'] = True - self.logger.info(" Wrong version installed. Want %s, but have %s"\ - % (self.str_evra(inst), self.str_evra(pkg))) - - qtext_versions = qtext_versions + 'U(%s -> %s) ' % \ - (self.str_evra(pkg), self.str_evra(inst)) + self.instance_status[inst]['version_fail']\ + = True + self.logger.info(" Wrong version " + "installed. Want %s, but " + "have %s" % + (self.str_evra(inst), + self.str_evra(pkg))) + + qtext_versions = qtext_versions + \ + 'U(%s -> %s) ' % (self.str_evra(pkg), + self.str_evra(inst)) elif len(arch_match) == 0: # This instance is not installed. self.instance_status[inst]['installed'] = False - self.logger.info(" %s is not installed." % self.str_evra(inst)) - qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst) + self.logger.info(" %s is not installed." % + self.str_evra(inst)) + qtext_versions = qtext_versions + \ + 'I(%s) ' % self.str_evra(inst) # Check the rpm verify results. for inst in instances: instance_fail = False # Dump the rpm verify results. #****Write something to format this nicely.***** - if self.setup['debug'] and self.instance_status[inst].get('verify', None): + if (Bcfg2.Options.setup.debug and + self.instance_status[inst].get('verify', None)): self.logger.debug(self.instance_status[inst]['verify']) self.instance_status[inst]['verify_fail'] = False if self.instance_status[inst].get('verify', None): if len(self.instance_status[inst].get('verify')) > 1: - self.logger.info("WARNING: Verification of more than one package instance.") + self.logger.info("WARNING: Verification of more " + "than one package instance.") for result in self.instance_status[inst]['verify']: # Check header results if result.get('hdr', None): instance_fail = True - self.instance_status[inst]['verify_fail'] = True + self.instance_status[inst]['verify_fail'] = \ + True # Check dependency results if result.get('deps', None): instance_fail = True - self.instance_status[inst]['verify_fail'] = True + self.instance_status[inst]['verify_fail'] = \ + True - # Check the rpm verify file results against the modlist - # and entry and per Instance Ignores. - ignores = [ig.get('name') for ig in entry.findall('Ignore')] + \ - [ig.get('name') for ig in inst.findall('Ignore')] + \ - self.ignores + # check the rpm verify file results against + # the modlist and entry and per Instance Ignores. + ignores = [ig.get('name') + for ig in entry.findall('Ignore')] + \ + [ig.get('name') + for ig in inst.findall('Ignore')] + \ + self.ignores for file_result in result.get('files', []): if file_result[-1] not in modlist + ignores: instance_fail = True - self.instance_status[inst]['verify_fail'] = True + self.instance_status[inst]['verify_fail'] \ + = True else: - self.logger.debug(" Modlist/Ignore match: %s" % \ - (file_result[-1])) - - if instance_fail == True: - self.logger.debug("*** Instance %s failed RPM verification ***" % \ + self.logger.debug(" Modlist/Ignore " + "match: %s" % + (file_result[-1])) + + if instance_fail: + self.logger.debug("*** Instance %s failed RPM " + "verification ***" % self.str_evra(inst)) - qtext_versions = qtext_versions + 'R(%s) ' % self.str_evra(inst) + qtext_versions = qtext_versions + \ + 'R(%s) ' % self.str_evra(inst) self.modlists[entry] = modlist - # Attach status structure for return to server for reporting. - inst.set('verify_status', str(self.instance_status[inst])) - - if self.instance_status[inst]['installed'] == False or \ - self.instance_status[inst].get('version_fail', False)== True or \ - self.instance_status[inst].get('verify_fail', False) == True: + # Attach status structure for reporting. + inst.set('verify_status', + str(self.instance_status[inst])) + + version_fail = self.instance_status[inst].get( + 'version_fail', False) + verify_fail = self.instance_status[inst].get( + 'verify_fail', False) + if not self.instance_status[inst]['installed'] or \ + version_fail or verify_fail: package_fail = True self.instance_status[inst]['pkg'] = entry self.modlists[entry] = modlist # Find Installed Instances that are not in the Config. - extra_installed = self.FindExtraInstances(entry, self.installed[entry.get('name')]) - if extra_installed != None: + extra_installed = self.FindExtraInstances( + entry, self.installed[entry.get('name')]) + if extra_installed is not None: package_fail = True self.extra_instances.append(extra_installed) for inst in extra_installed.findall('Instance'): - qtext_versions = qtext_versions + 'D(%s) ' % self.str_evra(inst) - self.logger.debug("Found Extra Instances %s" % qtext_versions) - - if package_fail == True: - self.logger.info(" Package %s failed verification." % \ - (entry.get('name'))) - qtext = 'Install/Upgrade/delete Package %s instance(s) - %s (y/N) ' % \ - (entry.get('name'), qtext_versions) + qtext_versions = qtext_versions + \ + 'D(%s) ' % self.str_evra(inst) + self.logger.debug("Found Extra Instances %s" % + qtext_versions) + + if package_fail: + self.logger.info(" Package %s failed verification." + % (entry.get('name'))) + qtext = 'Install/Upgrade/delete Package %s instance(s) - '\ + '%s (y/N) ' % (entry.get('name'), qtext_versions) entry.set('qtext', qtext) bcfg2_versions = '' - for bcfg2_inst in [inst for inst in instances if inst.tag == 'Instance']: - bcfg2_versions = bcfg2_versions + '(%s) ' % self.str_evra(bcfg2_inst) + for bcfg2_inst in [inst for inst in instances + if inst.tag == 'Instance']: + bcfg2_versions = bcfg2_versions + \ + '(%s) ' % self.str_evra(bcfg2_inst) if bcfg2_versions != '': entry.set('version', bcfg2_versions) installed_versions = '' for installed_inst in self.installed[entry.get('name')]: - installed_versions = installed_versions + '(%s) ' % \ - self.str_evra(installed_inst) + installed_versions = installed_versions + \ + '(%s) ' % self.str_evra(installed_inst) entry.set('current_version', installed_versions) return False else: # There are no Instances of this package installed. - self.logger.debug("Package %s has no instances installed" % (entry.get('name'))) + self.logger.debug("Package %s has no instances installed" % + (entry.get('name'))) entry.set('current_exists', 'false') bcfg2_versions = '' for inst in instances: - qtext_versions = qtext_versions + 'I(%s) ' % self.str_evra(inst) + qtext_versions = qtext_versions + \ + 'I(%s) ' % self.str_evra(inst) self.instance_status.setdefault(inst, {})['installed'] = False self.modlists[entry] = modlist self.instance_status[inst]['pkg'] = entry if inst.tag == 'Instance': - bcfg2_versions = bcfg2_versions + '(%s) ' % self.str_evra(inst) + bcfg2_versions = bcfg2_versions + \ + '(%s) ' % self.str_evra(inst) if bcfg2_versions != '': entry.set('version', bcfg2_versions) - entry.set('qtext', "Install Package %s Instance(s) %s? (y/N) " % \ + entry.set('qtext', "Install Package %s Instance(s) %s? (y/N) " % (entry.get('name'), qtext_versions)) return False @@ -421,26 +1614,31 @@ for pkg in packages: for inst in pkg: if pkg.get('name') != 'gpg-pubkey': - pkgspec = { 'name':pkg.get('name'), - 'epoch':inst.get('epoch', None), - 'version':inst.get('version'), - 'release':inst.get('release'), - 'arch':inst.get('arch') } + pkgspec = {'name': pkg.get('name'), + 'epoch': inst.get('epoch', None), + 'version': inst.get('version'), + 'release': inst.get('release'), + 'arch': inst.get('arch')} pkgspec_list.append(pkgspec) else: - pkgspec = { 'name':pkg.get('name'), - 'version':inst.get('version'), - 'release':inst.get('release')} - self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\ - % (pkgspec.get('name'), self.str_evra(pkgspec))) - self.logger.info(" This package will be deleted in a future version of the RPM driver.") + pkgspec = {'name': pkg.get('name'), + 'version': inst.get('version'), + 'release': inst.get('release')} + self.logger.info("WARNING: gpg-pubkey package not in " + "configuration %s %s" % + (pkgspec.get('name'), + self.str_evra(pkgspec))) + self.logger.info(" This package will be deleted " + "in a future version of the RPM driver.") #pkgspec_list.append(pkg_spec) - erase_results = rpmtools.rpm_erase(pkgspec_list, self.erase_flags) + erase_results = rpm_erase(pkgspec_list, + Bcfg2.Options.setup.rpm_erase_flags) if erase_results == []: self.modified += packages for pkg in pkgspec_list: - self.logger.info("Deleted %s %s" % (pkg.get('name'), self.str_evra(pkg))) + self.logger.info("Deleted %s %s" % (pkg.get('name'), + self.str_evra(pkg))) else: self.logger.info("Bulk erase failed with errors:") self.logger.debug("Erase results = %s" % erase_results) @@ -450,30 +1648,38 @@ pkg_modified = False for inst in pkg: if pkg.get('name') != 'gpg-pubkey': - pkgspec = { 'name':pkg.get('name'), - 'epoch':inst.get('epoch', None), - 'version':inst.get('version'), - 'release':inst.get('release'), - 'arch':inst.get('arch') } + pkgspec = {'name': pkg.get('name'), + 'epoch': inst.get('epoch', None), + 'version': inst.get('version'), + 'release': inst.get('release'), + 'arch': inst.get('arch')} pkgspec_list.append(pkgspec) else: - pkgspec = { 'name':pkg.get('name'), - 'version':inst.get('version'), - 'release':inst.get('release')} - self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\ - % (pkgspec.get('name'), self.str_evra(pkgspec))) - self.logger.info(" This package will be deleted in a future version of the RPM driver.") - continue # Don't delete the gpg-pubkey packages for now. - erase_results = rpmtools.rpm_erase([pkgspec], self.erase_flags) + pkgspec = {'name': pkg.get('name'), + 'version': inst.get('version'), + 'release': inst.get('release')} + self.logger.info("WARNING: gpg-pubkey package not in " + "configuration %s %s" % + (pkgspec.get('name'), + self.str_evra(pkgspec))) + self.logger.info(" This package will be " + "deleted in a future version of the " + "RPM driver.") + continue # don't delete the gpg-pubkey packages + erase_results = rpm_erase( + [pkgspec], + Bcfg2.Options.setup.rpm_erase_flags) if erase_results == []: pkg_modified = True - self.logger.info("Deleted %s %s" % \ - (pkgspec.get('name'), self.str_evra(pkgspec))) + self.logger.info("Deleted %s %s" % + (pkgspec.get('name'), + self.str_evra(pkgspec))) else: - self.logger.error("unable to delete %s %s" % \ - (pkgspec.get('name'), self.str_evra(pkgspec))) + self.logger.error("unable to delete %s %s" % + (pkgspec.get('name'), + self.str_evra(pkgspec))) self.logger.debug("Failure = %s" % erase_results) - if pkg_modified == True: + if pkg_modified: self.modified.append(pkg) self.RefreshPackages() @@ -489,33 +1695,35 @@ """ fix = False - if inst_status.get('installed', False) == False: - if instance.get('installed_action', 'install') == "install" and \ - self.installed_action == "install": + if not inst_status.get('installed', False): + if (instance.get('install_missing', 'true').lower() == "true" and + Bcfg2.Options.setup.rpm_install_missing): fix = True else: - self.logger.debug('Installed Action for %s %s is to not install' % \ + self.logger.debug('Installed Action for %s %s is to not ' + 'install' % (inst_status.get('pkg').get('name'), self.str_evra(instance))) - elif inst_status.get('version_fail', False) == True: - if instance.get('version_fail_action', 'upgrade') == "upgrade" and \ - self.version_fail_action == "upgrade": + elif inst_status.get('version_fail', False): + if (instance.get('fix_version', 'true').lower() == "true" and + Bcfg2.Options.setup.rpm_fix_version): fix = True else: - self.logger.debug('Version Fail Action for %s %s is to not upgrade' % \ + self.logger.debug('Version Fail Action for %s %s is to ' + 'not upgrade' % (inst_status.get('pkg').get('name'), self.str_evra(instance))) - elif inst_status.get('verify_fail', False) == True and self.name == "RPM": - # yum can't reinstall packages so only do this for rpm. - if instance.get('verify_fail_action', 'reinstall') == "reinstall" and \ - self.verify_fail_action == "reinstall": + elif inst_status.get('verify_fail', False): + if (instance.get('reinstall_broken', 'true').lower() == "true" and + Bcfg2.Options.setup.rpm_reinstall_broken): for inst in inst_status.get('verify'): # This needs to be a for loop rather than a straight get() # because the underlying routines handle multiple packages # and return a list of results. - self.logger.debug('reinstall_check: %s %s:%s-%s.%s' % inst.get('nevra')) + self.logger.debug('reinstall_check: %s %s:%s-%s.%s' % + inst.get('nevra')) if inst.get("hdr", False): fix = True @@ -523,7 +1731,8 @@ elif inst.get('files', False): # Parse rpm verify file results for file_result in inst.get('files', []): - self.logger.debug('reinstall_check: file: %s' % file_result) + self.logger.debug('reinstall_check: file: %s' % + file_result) if file_result[-2] != 'c': fix = True break @@ -532,13 +1741,14 @@ elif inst.get("deps", False): fix = False else: - self.logger.debug('Verify Fail Action for %s %s is to not reinstall' % \ - (inst_status.get('pkg').get('name'), - self.str_evra(instance))) + self.logger.debug('Verify Fail Action for %s %s is to not ' + 'reinstall' % + (inst_status.get('pkg').get('name'), + self.str_evra(instance))) return fix - def Install(self, packages, states): + def Install(self, packages): """ Try and fix everything that RPM.VerifyPackages() found wrong for each Package Entry. This can result in individual RPMs being @@ -559,6 +1769,7 @@ """ self.logger.info('Runing RPM.Install()') + states = dict() install_only_pkgs = [] gpg_keys = [] upgrade_pkgs = [] @@ -566,20 +1777,21 @@ # Remove extra instances. # Can not reverify because we don't have a package entry. if len(self.extra_instances) > 0: - if (self.setup.get('remove') == 'all' or \ - self.setup.get('remove') == 'packages') and\ - not self.setup.get('dryrun'): + if (Bcfg2.Options.setup.remove in ['all', 'packages'] and + not Bcfg2.Options.setup.dry_run): self.Remove(self.extra_instances) else: - self.logger.info("The following extra package instances will be removed by the '-r' option:") + self.logger.info("The following extra package instances will " + "be removed by the '-r' option:") for pkg in self.extra_instances: for inst in pkg: - self.logger.info(" %s %s" % (pkg.get('name'), self.str_evra(inst))) + self.logger.info(" %s %s" % (pkg.get('name'), + self.str_evra(inst))) # Figure out which instances of the packages actually need something # doing to them and place in the appropriate work 'queue'. for pkg in packages: - for inst in [instn for instn in pkg if instn.tag \ + for inst in [instn for instn in pkg if instn.tag in ['Instance', 'Package']]: if self.FixInstance(inst, self.instance_status[inst]): if pkg.get('name') == 'gpg-pubkey': @@ -592,10 +1804,10 @@ # Fix installOnlyPackages if len(install_only_pkgs) > 0: self.logger.info("Attempting to install 'install only packages'") - install_args = \ - " ".join(os.path.join(self.instance_status[inst].get('pkg').get('uri'), - inst.get('simplefile')) - for inst in install_only_pkgs) + install_args = " ".join(os.path.join( + self.instance_status[inst].get('pkg').get('uri'), + inst.get('simplefile')) + for inst in install_only_pkgs) if self.cmd.run("rpm --install --quiet --oldpackage --replacepkgs " "%s" % install_args): # The rpm command succeeded. All packages installed. @@ -607,35 +1819,34 @@ self.logger.error("Single Pass for InstallOnlyPackages Failed") installed_instances = [] for inst in install_only_pkgs: - install_args = \ - os.path.join(self.instance_status[inst].get('pkg').get('uri'), - inst.get('simplefile')) + pkguri = self.instance_status[inst].get('pkg').get('uri') + pkgname = self.instance_status[inst].get('pkg').get('name') + install_args = os.path.join(pkguri, inst.get('simplefile')) if self.cmd.run("rpm --install --quiet --oldpackage " "--replacepkgs %s" % install_args): installed_instances.append(inst) else: - self.logger.debug("InstallOnlyPackage %s %s would not install." % \ - (self.instance_status[inst].get('pkg').get('name'), \ - self.str_evra(inst))) + self.logger.debug("InstallOnlyPackage %s %s would not " + "install." % (pkgname, + self.str_evra(inst))) - install_pkg_set = set([self.instance_status[inst].get('pkg') \ - for inst in install_only_pkgs]) + install_pkg_set = set([self.instance_status[inst].get('pkg') + for inst in install_only_pkgs]) self.RefreshPackages() # Install GPG keys. if len(gpg_keys) > 0: for inst in gpg_keys: self.logger.info("Installing GPG keys.") - key_arg = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \ - inst.get('simplefile')) + pkguri = self.instance_status[inst].get('pkg').get('uri') + pkgname = self.instance_status[inst].get('pkg').get('name') + key_arg = os.path.join(pkguri, inst.get('simplefile')) if not self.cmd.run("rpm --import %s" % key_arg): self.logger.debug("Unable to install %s-%s" % - (self.instance_status[inst].get('pkg').get('name'), - self.str_evra(inst))) + (pkgname, self.str_evra(inst))) else: self.logger.debug("Installed %s-%s-%s" % - (self.instance_status[inst].get('pkg').get('name'), - inst.get('version'), + (pkgname, inst.get('version'), inst.get('release'))) self.RefreshPackages() self.gpg_keyids = self.getinstalledgpg() @@ -645,9 +1856,10 @@ # Fix upgradeable packages. if len(upgrade_pkgs) > 0: self.logger.info("Attempting to upgrade packages") - upgrade_args = " ".join([os.path.join(self.instance_status[inst].get('pkg').get('uri'), \ - inst.get('simplefile')) \ - for inst in upgrade_pkgs]) + upgrade_args = " ".join([os.path.join( + self.instance_status[inst].get('pkg').get('uri'), + inst.get('simplefile')) + for inst in upgrade_pkgs]) if self.cmd.run("rpm --upgrade --quiet --oldpackage --replacepkgs " "%s" % upgrade_args): # The rpm command succeeded. All packages upgraded. @@ -661,30 +1873,38 @@ self.logger.error("Single Pass for Upgrading Packages Failed") upgraded_instances = [] for inst in upgrade_pkgs: - upgrade_args = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \ - inst.get('simplefile')) - #self.logger.debug("rpm --upgrade --quiet --oldpackage --replacepkgs %s" % \ - # upgrade_args) + upgrade_args = os.path.join( + self.instance_status[inst].get('pkg').get('uri'), + inst.get('simplefile')) + #self.logger.debug("rpm --upgrade --quiet --oldpackage " + # "--replacepkgs %s" % upgrade_args) if self.cmd.run("rpm --upgrade --quiet --oldpackage " "--replacepkgs %s" % upgrade_args): upgraded_instances.append(inst) else: - self.logger.debug("Package %s %s would not upgrade." % - (self.instance_status[inst].get('pkg').get('name'), - self.str_evra(inst))) + self.logger.debug( + "Package %s %s would not upgrade." % + (self.instance_status[inst].get('pkg').get('name'), + self.str_evra(inst))) - upgrade_pkg_set = set([self.instance_status[inst].get('pkg') \ - for inst in upgrade_pkgs]) + upgrade_pkg_set = set([self.instance_status[inst].get('pkg') + for inst in upgrade_pkgs]) self.RefreshPackages() - if not self.setup['kevlar']: + if not Bcfg2.Options.setup.kevlar: for pkg_entry in packages: - self.logger.debug("Reverifying Failed Package %s" % (pkg_entry.get('name'))) - states[pkg_entry] = self.VerifyPackage(pkg_entry, \ - self.modlists.get(pkg_entry, [])) - - for entry in [ent for ent in packages if states[ent]]: - self.modified.append(entry) + self.logger.debug("Reverifying Failed Package %s" % + (pkg_entry.get('name'))) + states[pkg_entry] = self.VerifyPackage( + pkg_entry, self.modlists.get(pkg_entry, [])) + + self.modified.extend(ent for ent in packages if states[ent]) + return states + + def _log_incomplete_entry_install(self, etag, ename): + self.logger.error("Incomplete information for entry %s:%s; " + "cannot install" % (etag, ename)) + return def canInstall(self, entry): """Test if entry has enough information to be installed.""" @@ -692,18 +1912,17 @@ return False if 'failure' in entry.attrib: - self.logger.error("Cannot install entry %s:%s with bind failure" % \ + self.logger.error("Cannot install entry %s:%s with bind failure" % (entry.tag, entry.get('name'))) return False - instances = entry.findall('Instance') - # If the entry wasn't verifiable, then we really don't want to try and fix something - # that we don't know is broken. + # If the entry wasn't verifiable, then we really don't want to try + # and fix something that we don't know is broken. if not self.canVerify(entry): - self.logger.debug("WARNING: Package %s was not verifiable, not passing to Install()" \ - % entry.get('name')) + self.logger.debug("WARNING: Package %s was not verifiable, not " + "passing to Install()" % entry.get('name')) return False if not instances: @@ -711,53 +1930,70 @@ if entry.get('name') == 'gpg-pubkey': # gpg-pubkey packages aren't really pacakges, so we have to do # something a little different. - # Check that the Package Level has what we need for verification. - if [attr for attr in self.__gpg_ireq__[entry.tag] if attr not in entry.attrib]: - self.logger.error("Incomplete information for entry %s:%s; cannot install" \ - % (entry.tag, entry.get('name'))) + # check that the Package level has + # what we need for verification. + if [attr for attr in self.__gpg_ireq__[entry.tag] + if attr not in entry.attrib]: + self._log_incomplete_entry_install(entry.tag, + entry.get('name')) return False else: - if [attr for attr in self.__ireq__[entry.tag] if attr not in entry.attrib]: - self.logger.error("Incomplete information for entry %s:%s; cannot install" \ - % (entry.tag, entry.get('name'))) + if [attr for attr in self.__ireq__[entry.tag] + if attr not in entry.attrib]: + self._log_incomplete_entry_install(entry.tag, + entry.get('name')) return False else: if entry.get('name') == 'gpg-pubkey': # gpg-pubkey packages aren't really pacakges, so we have to do # something a little different. - # Check that the Package Level has what we need for verification. - if [attr for attr in self.__new_gpg_ireq__[entry.tag] if attr not in entry.attrib]: - self.logger.error("Incomplete information for entry %s:%s; cannot install" \ - % (entry.tag, entry.get('name'))) + # check that the Package level has + # what we need for verification. + if [attr for attr in self.__new_gpg_ireq__[entry.tag] + if attr not in entry.attrib]: + self._log_incomplete_entry_install(entry.tag, + entry.get('name')) return False - # Check that the Instance Level has what we need for verification. + # check that the Instance level has + # what we need for verification. for inst in instances: - if [attr for attr in self.__new_gpg_ireq__[inst.tag] \ - if attr not in inst.attrib]: - self.logger.error("Incomplete information for entry %s:%s; cannot install"\ - % (inst.tag, entry.get('name'))) + if [attr for attr in self.__new_gpg_ireq__[inst.tag] + if attr not in inst.attrib]: + self._log_incomplete_entry_install(inst.tag, + entry.get('name')) return False else: # New format with Instances. - # Check that the Package Level has what we need for verification. - if [attr for attr in self.__new_ireq__[entry.tag] if attr not in entry.attrib]: - self.logger.error("Incomplete information for entry %s:%s; cannot install" \ - % (entry.tag, entry.get('name'))) - self.logger.error(" Required attributes that may not be present are %s" \ - % (self.__new_ireq__[entry.tag])) + # check that the Package level has + # what we need for verification. + if [attr for attr in self.__new_ireq__[entry.tag] + if attr not in entry.attrib]: + self._log_incomplete_entry_install(entry.tag, + entry.get('name')) + self.logger.error(" Required attributes that " + "may not be present are %s" % + (self.__new_ireq__[entry.tag])) return False - # Check that the Instance Level has what we need for verification. + # check that the Instance level has + # what we need for verification. for inst in instances: if inst.tag == 'Instance': - if [attr for attr in self.__new_ireq__[inst.tag] \ - if attr not in inst.attrib]: - self.logger.error("Incomplete information for %s of package %s; cannot install" \ - % (inst.tag, entry.get('name'))) - self.logger.error(" Required attributes that may not be present are %s" \ + if [attr for attr in self.__new_ireq__[inst.tag] + if attr not in inst.attrib]: + self._log_incomplete_entry_install( + inst.tag, + entry.get('name')) + self.logger.error(" Required attributes " + "that may not be present are %s" % (self.__new_ireq__[inst.tag])) return False return True + def _log_incomplete_entry_verify(self, etag, ename): + self.logger.error("Incomplete information for entry %s:%s; " + "cannot verify" % (etag, ename)) + return + def canVerify(self, entry): """ Test if entry has enough information to be verified. @@ -775,13 +2011,15 @@ return False if 'failure' in entry.attrib: - self.logger.error("Entry %s:%s reports bind failure: %s" % \ - (entry.tag, entry.get('name'), entry.get('failure'))) + self.logger.error("Entry %s:%s reports bind failure: %s" % + (entry.tag, entry.get('name'), + entry.get('failure'))) return False - # We don't want to do any checks so we don't care what the entry has in it. - if (not self.pkg_checks or - entry.get('pkg_checks', 'true').lower() == 'false'): + # we don't want to do any checks so + # we don't care what the entry has in it. + if (not Bcfg2.Options.setup.rpm_pkg_checks or + entry.get('pkg_checks', 'true').lower() == 'false'): return True instances = entry.findall('Instance') @@ -791,53 +2029,72 @@ if entry.get('name') == 'gpg-pubkey': # gpg-pubkey packages aren't really pacakges, so we have to do # something a little different. - # Check that the Package Level has what we need for verification. - if [attr for attr in self.__gpg_req__[entry.tag] if attr not in entry.attrib]: - self.logger.error("Incomplete information for entry %s:%s; cannot verify" \ - % (entry.tag, entry.get('name'))) + # check that the Package level has + # what we need for verification. + if [attr for attr in self.__gpg_req__[entry.tag] + if attr not in entry.attrib]: + self._log_incomplete_entry_verify(entry.tag, + entry.get('name')) return False elif entry.tag == 'Path' and entry.get('type') == 'ignore': # ignored Paths are only relevant during failed package # verification pass else: - if [attr for attr in self.__req__[entry.tag] if attr not in entry.attrib]: - self.logger.error("Incomplete information for entry %s:%s; cannot verify" \ - % (entry.tag, entry.get('name'))) + if [attr for attr in self.__req__[entry.tag] + if attr not in entry.attrib]: + self._log_incomplete_entry_verify(entry.tag, + entry.get('name')) return False else: if entry.get('name') == 'gpg-pubkey': # gpg-pubkey packages aren't really pacakges, so we have to do # something a little different. - # Check that the Package Level has what we need for verification. - if [attr for attr in self.__new_gpg_req__[entry.tag] if attr not in entry.attrib]: - self.logger.error("Incomplete information for entry %s:%s; cannot verify" \ - % (entry.tag, entry.get('name'))) + # check that the Package level has + # what we need for verification. + if [attr for attr in self.__new_gpg_req__[entry.tag] + if attr not in entry.attrib]: + self._log_incomplete_entry_verify(entry.tag, + entry.get('name')) return False - # Check that the Instance Level has what we need for verification. + # check that the Instance level has + # what we need for verification. for inst in instances: - if [attr for attr in self.__new_gpg_req__[inst.tag] \ - if attr not in inst.attrib]: - self.logger.error("Incomplete information for entry %s:%s; cannot verify" \ - % (inst.tag, inst.get('name'))) + if [attr for attr in self.__new_gpg_req__[inst.tag] + if attr not in inst.attrib]: + self._log_incomplete_entry_verify(inst.tag, + inst.get('name')) return False else: - # New format with Instances, or old style modified. - # Check that the Package Level has what we need for verification. - if [attr for attr in self.__new_req__[entry.tag] if attr not in entry.attrib]: - self.logger.error("Incomplete information for entry %s:%s; cannot verify" \ - % (entry.tag, entry.get('name'))) + # new format with Instances, or old style modified. + # check that the Package level has + # what we need for verification. + if [attr for attr in self.__new_req__[entry.tag] + if attr not in entry.attrib]: + self._log_incomplete_entry_verify(entry.tag, + entry.get('name')) return False - # Check that the Instance Level has what we need for verification. + # check that the Instance level has + # what we need for verification. for inst in instances: if inst.tag == 'Instance': - if [attr for attr in self.__new_req__[inst.tag] \ - if attr not in inst.attrib]: - self.logger.error("Incomplete information for entry %s:%s; cannot verify" \ - % (inst.tag, inst.get('name'))) + if [attr for attr in self.__new_req__[inst.tag] + if attr not in inst.attrib]: + self._log_incomplete_entry_verify(inst.tag, + inst.get('name')) return False return True + def _get_tmp_entry(self, extra_entry, inst): + tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', + version=inst.get('version'), + release=inst.get('release')) + if inst.get('epoch', None) is not None: + tmp_entry.set('epoch', str(inst.get('epoch'))) + if installed_inst.get('arch', None) is not None: + tmp_entry.set('arch', inst.get('arch')) + return + def FindExtra(self): """Find extra packages.""" packages = [entry.get('name') for entry in self.getSupportedEntries()] @@ -845,22 +2102,17 @@ for (name, instances) in list(self.installed.items()): if name not in packages: - extra_entry = Bcfg2.Client.XML.Element('Package', name=name, type=self.pkgtype) + extra_entry = Bcfg2.Client.XML.Element('Package', + name=name, + type=self.pkgtype) for installed_inst in instances: - if self.setup['extra']: - self.logger.info("Extra Package %s %s." % \ + if Bcfg2.Options.setup.extra: + self.logger.info("Extra Package %s %s." % (name, self.str_evra(installed_inst))) - tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \ - version = installed_inst.get('version'), \ - release = installed_inst.get('release')) - if installed_inst.get('epoch', None) != None: - tmp_entry.set('epoch', str(installed_inst.get('epoch'))) - if installed_inst.get('arch', None) != None: - tmp_entry.set('arch', installed_inst.get('arch')) + self._get_tmp_entry(extra_entry, installed_inst) extras.append(extra_entry) return extras - def FindExtraInstances(self, pkg_entry, installed_entry): """ Check for installed instances that are not in the config. @@ -869,8 +2121,11 @@ """ name = pkg_entry.get('name') - extra_entry = Bcfg2.Client.XML.Element('Package', name=name, type=self.pkgtype) - instances = [inst for inst in pkg_entry if inst.tag == 'Instance' or inst.tag == 'Package'] + extra_entry = Bcfg2.Client.XML.Element('Package', + name=name, + type=self.pkgtype) + instances = [inst for inst in pkg_entry if + inst.tag == 'Instance' or inst.tag == 'Package'] if name in self.installOnlyPkgs: for installed_inst in installed_entry: not_found = True @@ -879,36 +2134,25 @@ self.inst_evra_equal(inst, installed_inst): not_found = False break - if not_found == True: + if not_found: # Extra package. - self.logger.info("Extra InstallOnlyPackage %s %s." % \ + self.logger.info("Extra InstallOnlyPackage %s %s." % (name, self.str_evra(installed_inst))) - tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \ - version = installed_inst.get('version'), \ - release = installed_inst.get('release')) - if installed_inst.get('epoch', None) != None: - tmp_entry.set('epoch', str(installed_inst.get('epoch'))) - if installed_inst.get('arch', None) != None: - tmp_entry.set('arch', installed_inst.get('arch')) + self._get_tmp_entry(extra_entry, installed_inst) else: # Normal package, only check arch. for installed_inst in installed_entry: not_found = True for inst in instances: - if installed_inst.get('arch', None) == inst.get('arch', None) or\ - inst.tag == 'Package': + if (installed_inst.get('arch', None) == + inst.get('arch', None) or + inst.tag == 'Package'): not_found = False break if not_found: - self.logger.info("Extra Normal Package Instance %s %s" % \ + self.logger.info("Extra Normal Package Instance %s %s" % (name, self.str_evra(installed_inst))) - tmp_entry = Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', \ - version = installed_inst.get('version'), \ - release = installed_inst.get('release')) - if installed_inst.get('epoch', None) != None: - tmp_entry.set('epoch', str(installed_inst.get('epoch'))) - if installed_inst.get('arch', None) != None: - tmp_entry.set('arch', installed_inst.get('arch')) + self._get_tmp_entry(extra_entry, installed_inst) if len(extra_entry) == 0: extra_entry = None @@ -932,9 +2176,10 @@ Compare old style entry to installed entry. Which means ignore the epoch and arch. ''' - if (config_entry.tag == 'Package' and \ - config_entry.get('version') == installed_entry.get('version') and \ - config_entry.get('release') == installed_entry.get('release')): + if (config_entry.tag == 'Package' and + config_entry.get('version') == installed_entry.get('version') + and + config_entry.get('release') == installed_entry.get('release')): return True else: return False @@ -942,18 +2187,19 @@ def inst_evra_equal(self, config_entry, installed_entry): """Compare new style instance to installed entry.""" - if config_entry.get('epoch', None) != None: + if config_entry.get('epoch', None) is not None: epoch = int(config_entry.get('epoch')) else: epoch = None - if (config_entry.tag == 'Instance' and \ - (epoch == installed_entry.get('epoch', 0) or \ - (epoch == 0 and installed_entry.get('epoch', 0) == None) or \ - (epoch == None and installed_entry.get('epoch', 0) == 0)) and \ - config_entry.get('version') == installed_entry.get('version') and \ - config_entry.get('release') == installed_entry.get('release') and \ - config_entry.get('arch', None) == installed_entry.get('arch', None)): + if (config_entry.tag == 'Instance' and + (epoch == installed_entry.get('epoch', 0) or + (epoch == 0 and installed_entry.get('epoch', 0) is None) or + (epoch is None and installed_entry.get('epoch', 0) == 0)) and + config_entry.get('version') == installed_entry.get('version') and + config_entry.get('release') == installed_entry.get('release') and + config_entry.get('arch', None) == installed_entry.get('arch', + None)): return True else: return False @@ -966,10 +2212,10 @@ (big-endian) of the key ID which is good enough for our purposes. """ - init_ts = rpmtools.rpmtransactionset() - init_ts.setVSFlags(rpm._RPMVSF_NODIGESTS|rpm._RPMVSF_NOSIGNATURES) - gpg_hdrs = rpmtools.getheadersbykeyword(init_ts, **{'name':'gpg-pubkey'}) - keyids = [ header[rpm.RPMTAG_VERSION] for header in gpg_hdrs] + init_ts = rpmtransactionset() + init_ts.setVSFlags(rpm._RPMVSF_NODIGESTS | rpm._RPMVSF_NOSIGNATURES) + gpg_hdrs = getheadersbykeyword(init_ts, **{'name': 'gpg-pubkey'}) + keyids = [header[rpm.RPMTAG_VERSION] for header in gpg_hdrs] keyids.append('None') init_ts.closeDB() del init_ts diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/rpmtools.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/rpmtools.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/rpmtools.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/rpmtools.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1091 +0,0 @@ -#!/usr/bin/env python -""" - Module that uses rpm-python to implement the following rpm - functionality for the bcfg2 RPM and YUM client drivers: - - rpm -qa - rpm --verify - rpm --erase - - The code closely follows the rpm C code. - - The code was written to be used in the bcfg2 RPM/YUM drivers. - - Some command line options have been provided to assist with - testing and development, but the output isn't pretty and looks - nothing like rpm output. - - Run 'rpmtools' -h for the options. - -""" - -import grp -import optparse -import os -import pwd -import rpm -import stat -import sys -if sys.version_info >= (2, 5): - import hashlib - py24compat = False -else: - # FIXME: Remove when client python dep is 2.5 or greater - py24compat = True - import md5 - -# Determine what prelink tools we have available. -# The isprelink module is a python extension that examines the ELF headers -# to see if the file has been prelinked. If it is not present a lot of files -# are unnecessarily run through the prelink command. -try: - from isprelink import * - isprelink_imported = True -except ImportError: - isprelink_imported = False - -# If the prelink command is installed on the system then we need to do -# prelink -y on files. -if os.access('/usr/sbin/prelink', os.X_OK): - prelink_exists = True -else: - prelink_exists = False - -# If we don't have isprelink then we will use the prelink configuration file to -# filter what we have to put through prelink -y. -import re -blacklist = [] -whitelist = [] -try: - f = open('/etc/prelink.conf', mode='r') - for line in f: - if line.startswith('#'): - continue - option, pattern = line.split() - if pattern.startswith('*.'): - pattern = pattern.replace('*.', '\.') - pattern += '$' - elif pattern.startswith('/'): - pattern = '^' + pattern - if option == '-b': - blacklist.append(pattern) - elif option == '-l': - whitelist.append(pattern) - f.close() -except IOError: - pass - -blacklist_re = re.compile('|'.join(blacklist)) -whitelist_re = re.compile('|'.join(whitelist)) - -# Flags that are not defined in rpm-python. -# They are defined in lib/rpmcli.h -# Bit(s) for verifyFile() attributes. -# -RPMVERIFY_NONE = 0 # /*!< */ -RPMVERIFY_MD5 = 1 # 1 << 0 # /*!< from %verify(md5) */ -RPMVERIFY_FILESIZE = 2 # 1 << 1 # /*!< from %verify(size) */ -RPMVERIFY_LINKTO = 4 # 1 << 2 # /*!< from %verify(link) */ -RPMVERIFY_USER = 8 # 1 << 3 # /*!< from %verify(user) */ -RPMVERIFY_GROUP = 16 # 1 << 4 # /*!< from %verify(group) */ -RPMVERIFY_MTIME = 32 # 1 << 5 # /*!< from %verify(mtime) */ -RPMVERIFY_MODE = 64 # 1 << 6 # /*!< from %verify(mode) */ -RPMVERIFY_RDEV = 128 # 1 << 7 # /*!< from %verify(rdev) */ -RPMVERIFY_CONTEXTS = 32768 # (1 << 15) # /*!< from --nocontexts */ -RPMVERIFY_READLINKFAIL = 268435456 # (1 << 28) # /*!< readlink failed */ -RPMVERIFY_READFAIL = 536870912 # (1 << 29) # /*!< file read failed */ -RPMVERIFY_LSTATFAIL = 1073741824 # (1 << 30) # /*!< lstat failed */ -RPMVERIFY_LGETFILECONFAIL = 2147483648 # (1 << 31) # /*!< lgetfilecon failed */ - -RPMVERIFY_FAILURES = \ - (RPMVERIFY_LSTATFAIL|RPMVERIFY_READFAIL|RPMVERIFY_READLINKFAIL| \ - RPMVERIFY_LGETFILECONFAIL) - -# Bit(s) to control rpm_verify() operation. -# -VERIFY_DEFAULT = 0, # /*!< */ -VERIFY_MD5 = 1 << 0 # /*!< from --nomd5 */ -VERIFY_SIZE = 1 << 1 # /*!< from --nosize */ -VERIFY_LINKTO = 1 << 2 # /*!< from --nolinkto */ -VERIFY_USER = 1 << 3 # /*!< from --nouser */ -VERIFY_GROUP = 1 << 4 # /*!< from --nogroup */ -VERIFY_MTIME = 1 << 5 # /*!< from --nomtime */ -VERIFY_MODE = 1 << 6 # /*!< from --nomode */ -VERIFY_RDEV = 1 << 7 # /*!< from --nodev */ -# /* bits 8-14 unused, reserved for rpmVerifyAttrs */ -VERIFY_CONTEXTS = 1 << 15 # /*!< verify: from --nocontexts */ -VERIFY_FILES = 1 << 16 # /*!< verify: from --nofiles */ -VERIFY_DEPS = 1 << 17 # /*!< verify: from --nodeps */ -VERIFY_SCRIPT = 1 << 18 # /*!< verify: from --noscripts */ -VERIFY_DIGEST = 1 << 19 # /*!< verify: from --nodigest */ -VERIFY_SIGNATURE = 1 << 20 # /*!< verify: from --nosignature */ -VERIFY_PATCHES = 1 << 21 # /*!< verify: from --nopatches */ -VERIFY_HDRCHK = 1 << 22 # /*!< verify: from --nohdrchk */ -VERIFY_FOR_LIST = 1 << 23 # /*!< query: from --list */ -VERIFY_FOR_STATE = 1 << 24 # /*!< query: from --state */ -VERIFY_FOR_DOCS = 1 << 25 # /*!< query: from --docfiles */ -VERIFY_FOR_CONFIG = 1 << 26 # /*!< query: from --configfiles */ -VERIFY_FOR_DUMPFILES = 1 << 27 # /*!< query: from --dump */ -# /* bits 28-31 used in rpmVerifyAttrs */ - -# Comes from C cource. lib/rpmcli.h -VERIFY_ATTRS = \ - (VERIFY_MD5 | VERIFY_SIZE | VERIFY_LINKTO | VERIFY_USER | VERIFY_GROUP | \ - VERIFY_MTIME | VERIFY_MODE | VERIFY_RDEV | VERIFY_CONTEXTS) - -VERIFY_ALL = \ - (VERIFY_ATTRS | VERIFY_FILES | VERIFY_DEPS | VERIFY_SCRIPT | VERIFY_DIGEST |\ - VERIFY_SIGNATURE | VERIFY_HDRCHK) - - -# Some masks for what checks to NOT do on these file types. -# The C code actiually resets these up for every file. -DIR_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | \ - RPMVERIFY_LINKTO) - -# These file types all have the same mask, but hopefully this will make the -# code more readable. -FIFO_FLAGS = CHR_FLAGS = BLK_FLAGS = GHOST_FLAGS = DIR_FLAGS - -LINK_FLAGS = ~(RPMVERIFY_MD5 | RPMVERIFY_FILESIZE | RPMVERIFY_MTIME | \ - RPMVERIFY_MODE | RPMVERIFY_USER | RPMVERIFY_GROUP) - -REG_FLAGS = ~(RPMVERIFY_LINKTO) - - -def s_isdev(mode): - """ - Check to see if a file is a device. - - """ - return stat.S_ISBLK(mode) | stat.S_ISCHR(mode) - -def rpmpackagelist(rts): - """ - Equivalent of rpm -qa. Intended for RefreshPackages() in the RPM Driver. - Requires rpmtransactionset() to be run first to get a ts. - Returns a list of pkgspec dicts. - - e.g. [ {'name':'foo', 'epoch':'20', 'version':'1.2', 'release':'5', 'arch':'x86_64' }, - {'name':'bar', 'epoch':'10', 'version':'5.2', 'release':'2', 'arch':'x86_64' } ] - - """ - return [{'name':header[rpm.RPMTAG_NAME], - 'epoch':header[rpm.RPMTAG_EPOCH], - 'version':header[rpm.RPMTAG_VERSION], - 'release':header[rpm.RPMTAG_RELEASE], - 'arch':header[rpm.RPMTAG_ARCH], - 'gpgkeyid':header.sprintf("%|SIGGPG?{%{SIGGPG:pgpsig}}:{None}|").split()[-1]} - for header in rts.dbMatch()] - -def getindexbykeyword(index_ts, **kwargs): - """ - Return list of indexs from the rpmdb matching keywords - ex: getHeadersByKeyword(name='foo', version='1', release='1') - - Can be passed any structure that can be indexed by the pkgspec - keyswords as other keys are filtered out. - - """ - lst = [] - name = kwargs.get('name') - if name: - index_mi = index_ts.dbMatch(rpm.RPMTAG_NAME, name) - else: - index_mi = index_ts.dbMatch() - - if 'epoch' in kwargs: - if kwargs['epoch'] != None and kwargs['epoch'] != 'None': - kwargs['epoch'] = int(kwargs['epoch']) - else: - del(kwargs['epoch']) - - keywords = [key for key in list(kwargs.keys()) \ - if key in ('name', 'epoch', 'version', 'release', 'arch')] - keywords_len = len(keywords) - for hdr in index_mi: - match = 0 - for keyword in keywords: - if hdr[keyword] == kwargs[keyword]: - match += 1 - if match == keywords_len: - lst.append(index_mi.instance()) - del index_mi - return lst - -def getheadersbykeyword(header_ts, **kwargs): - """ - Borrowed parts of this from from Yum. Need to fix it though. - Epoch is not handled right. - - Return list of headers from the rpmdb matching keywords - ex: getHeadersByKeyword(name='foo', version='1', release='1') - - Can be passed any structure that can be indexed by the pkgspec - keyswords as other keys are filtered out. - - """ - lst = [] - name = kwargs.get('name') - if name: - header_mi = header_ts.dbMatch(rpm.RPMTAG_NAME, name) - else: - header_mi = header_ts.dbMatch() - - if 'epoch' in kwargs: - if kwargs['epoch'] != None and kwargs['epoch'] != 'None': - kwargs['epoch'] = int(kwargs['epoch']) - else: - del(kwargs['epoch']) - - keywords = [key for key in list(kwargs.keys()) \ - if key in ('name', 'epoch', 'version', 'release', 'arch')] - keywords_len = len(keywords) - for hdr in header_mi: - match = 0 - for keyword in keywords: - if hdr[keyword] == kwargs[keyword]: - match += 1 - if match == keywords_len: - lst.append(hdr) - del header_mi - return lst - -def prelink_md5_check(filename): - """ - Checks if a file is prelinked. If it is run it through prelink -y - to get the unprelinked md5 and file size. - - Return 0 if the file was not prelinked, otherwise return the file size. - Always return the md5. - - """ - prelink = False - try: - plf = open(filename, "rb") - except IOError: - return False, 0 - - if prelink_exists: - if isprelink_imported: - plfd = plf.fileno() - if isprelink(plfd): - plf.close() - cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \ - % (re.escape(filename)) - plf = os.popen(cmd, 'rb') - prelink = True - elif whitelist_re.search(filename) and not blacklist_re.search(filename): - plf.close() - cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \ - % (re.escape(filename)) - plf = os.popen(cmd, 'rb') - prelink = True - - fsize = 0 - if py24compat: - chksum = md5.new() - else: - chksum = hashlib.md5() - while 1: - data = plf.read() - if not data: - break - fsize += len(data) - chksum.update(data) - plf.close() - file_md5 = chksum.hexdigest() - if prelink: - return file_md5, fsize - else: - return file_md5, 0 - -def prelink_size_check(filename): - """ - This check is only done if the prelink_md5_check() is not done first. - - Checks if a file is prelinked. If it is run it through prelink -y - to get the unprelinked file size. - - Return 0 if the file was not prelinked, otherwise return the file size. - - """ - fsize = 0 - try: - plf = open(filename, "rb") - except IOError: - return False - - if prelink_exists: - if isprelink_imported: - plfd = plf.fileno() - if isprelink(plfd): - plf.close() - cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \ - % (re.escape(filename)) - plf = os.popen(cmd, 'rb') - - while 1: - data = plf.read() - if not data: - break - fsize += len(data) - - elif whitelist_re.search(filename) and not blacklist_re.search(filename): - plf.close() - cmd = '/usr/sbin/prelink -y %s 2> /dev/null' \ - % (re.escape(filename)) - plf = os.popen(cmd, 'rb') - - while 1: - data = plf.read() - if not data: - break - fsize += len(data) - - plf.close() - - return fsize - -def debug_verify_flags(vflags): - """ - Decodes the verify flags bits. - """ - if vflags & RPMVERIFY_MD5: - print('RPMVERIFY_MD5') - if vflags & RPMVERIFY_FILESIZE: - print('RPMVERIFY_FILESIZE') - if vflags & RPMVERIFY_LINKTO: - print('RPMVERIFY_LINKTO') - if vflags & RPMVERIFY_USER: - print('RPMVERIFY_USER') - if vflags & RPMVERIFY_GROUP: - print('RPMVERIFY_GROUP') - if vflags & RPMVERIFY_MTIME: - print('RPMVERIFY_MTIME') - if vflags & RPMVERIFY_MODE: - print('RPMVERIFY_MODE') - if vflags & RPMVERIFY_RDEV: - print('RPMVERIFY_RDEV') - if vflags & RPMVERIFY_CONTEXTS: - print('RPMVERIFY_CONTEXTS') - if vflags & RPMVERIFY_READLINKFAIL: - print('RPMVERIFY_READLINKFAIL') - if vflags & RPMVERIFY_READFAIL: - print('RPMVERIFY_READFAIL') - if vflags & RPMVERIFY_LSTATFAIL: - print('RPMVERIFY_LSTATFAIL') - if vflags & RPMVERIFY_LGETFILECONFAIL: - print('RPMVERIFY_LGETFILECONFAIL') - -def debug_file_flags(fflags): - """ - Decodes the file flags bits. - """ - if fflags & rpm.RPMFILE_CONFIG: - print('rpm.RPMFILE_CONFIG') - - if fflags & rpm.RPMFILE_DOC: - print('rpm.RPMFILE_DOC') - - if fflags & rpm.RPMFILE_ICON: - print('rpm.RPMFILE_ICON') - - if fflags & rpm.RPMFILE_MISSINGOK: - print('rpm.RPMFILE_MISSINGOK') - - if fflags & rpm.RPMFILE_NOREPLACE: - print('rpm.RPMFILE_NOREPLACE') - - if fflags & rpm.RPMFILE_GHOST: - print('rpm.RPMFILE_GHOST') - - if fflags & rpm.RPMFILE_LICENSE: - print('rpm.RPMFILE_LICENSE') - - if fflags & rpm.RPMFILE_README: - print('rpm.RPMFILE_README') - - if fflags & rpm.RPMFILE_EXCLUDE: - print('rpm.RPMFILE_EXLUDE') - - if fflags & rpm.RPMFILE_UNPATCHED: - print('rpm.RPMFILE_UNPATCHED') - - if fflags & rpm.RPMFILE_PUBKEY: - print('rpm.RPMFILE_PUBKEY') - -def rpm_verify_file(fileinfo, rpmlinktos, omitmask): - """ - Verify all the files in a package. - - Returns a list of error flags, the file type and file name. The list - entries are strings that are the same as the labels for the bitwise - flags used in the C code. - - """ - (fname, fsize, fmode, fmtime, fflags, frdev, finode, fnlink, fstate, \ - vflags, fuser, fgroup, fmd5) = fileinfo - - # 1. rpmtsRootDir stuff. What does it do and where to I get it from? - - file_results = [] - flags = vflags - - # Check to see if the file was installed - if not pretend all is ok. - # This is what the rpm C code does! - if fstate != rpm.RPMFILE_STATE_NORMAL: - return file_results - - # Get the installed files stats - try: - lstat = os.lstat(fname) - except OSError: - if not (fflags & (rpm.RPMFILE_MISSINGOK|rpm.RPMFILE_GHOST)): - file_results.append('RPMVERIFY_LSTATFAIL') - #file_results.append(fname) - return file_results - - # 5. Contexts? SELinux stuff? - - # Setup what checks to do. This is straight out of the C code. - if stat.S_ISDIR(lstat.st_mode): - flags &= DIR_FLAGS - elif stat.S_ISLNK(lstat.st_mode): - flags &= LINK_FLAGS - elif stat.S_ISFIFO(lstat.st_mode): - flags &= FIFO_FLAGS - elif stat.S_ISCHR(lstat.st_mode): - flags &= CHR_FLAGS - elif stat.S_ISBLK(lstat.st_mode): - flags &= BLK_FLAGS - else: - flags &= REG_FLAGS - - if (fflags & rpm.RPMFILE_GHOST): - flags &= GHOST_FLAGS - - flags &= ~(omitmask | RPMVERIFY_FAILURES) - - # 8. SELinux stuff. - - prelink_size = 0 - if flags & RPMVERIFY_MD5: - prelink_md5, prelink_size = prelink_md5_check(fname) - if prelink_md5 == False: - file_results.append('RPMVERIFY_MD5') - file_results.append('RPMVERIFY_READFAIL') - elif prelink_md5 != fmd5: - file_results.append('RPMVERIFY_MD5') - - if flags & RPMVERIFY_LINKTO: - linkto = os.readlink(fname) - if not linkto: - file_results.append('RPMVERIFY_READLINKFAIL') - file_results.append('RPMVERIFY_LINKTO') - else: - if len(rpmlinktos) == 0 or linkto != rpmlinktos: - file_results.append('RPMVERIFY_LINKTO') - - if flags & RPMVERIFY_FILESIZE: - if not (flags & RPMVERIFY_MD5): # prelink check hasn't been done. - prelink_size = prelink_size_check(fname) - if (prelink_size != 0): # This is a prelinked file. - if (prelink_size != fsize): - file_results.append('RPMVERIFY_FILESIZE') - elif lstat.st_size != fsize: # It wasn't a prelinked file. - file_results.append('RPMVERIFY_FILESIZE') - - if flags & RPMVERIFY_MODE: - metamode = fmode - filemode = lstat.st_mode - - # Comparing the type of %ghost files is meaningless, but perms are ok. - if fflags & rpm.RPMFILE_GHOST: - metamode &= ~0xf000 - filemode &= ~0xf000 - - if (stat.S_IFMT(metamode) != stat.S_IFMT(filemode)) or \ - (stat.S_IMODE(metamode) != stat.S_IMODE(filemode)): - file_results.append('RPMVERIFY_MODE') - - if flags & RPMVERIFY_RDEV: - if (stat.S_ISCHR(fmode) != stat.S_ISCHR(lstat.st_mode) or - stat.S_ISBLK(fmode) != stat.S_ISBLK(lstat.st_mode)): - file_results.append('RPMVERIFY_RDEV') - elif (s_isdev(fmode) & s_isdev(lstat.st_mode)): - st_rdev = lstat.st_rdev - if frdev != st_rdev: - file_results.append('RPMVERIFY_RDEV') - - if flags & RPMVERIFY_MTIME: - if lstat.st_mtime != fmtime: - file_results.append('RPMVERIFY_MTIME') - - if flags & RPMVERIFY_USER: - try: - user = pwd.getpwuid(lstat.st_uid)[0] - except KeyError: - user = None - if not user or not fuser or (user != fuser): - file_results.append('RPMVERIFY_USER') - - if flags & RPMVERIFY_GROUP: - try: - group = grp.getgrgid(lstat.st_gid)[0] - except KeyError: - group = None - if not group or not fgroup or (group != fgroup): - file_results.append('RPMVERIFY_GROUP') - - return file_results - -def rpm_verify_dependencies(header): - """ - Check package dependencies. Header is an rpm.hdr. - - Don't like opening another ts to do this, but - it was the only way I could find of clearing the ts - out. - - Have asked on the rpm-maint list on how to do - this the right way (28 Feb 2007). - - ts.check() returns: - - ((name, version, release), (reqname, reqversion), \ - flags, suggest, sense) - - """ - _ts1 = rpmtransactionset() - _ts1.addInstall(header, 'Dep Check', 'i') - dep_errors = _ts1.check() - _ts1.closeDB() - return dep_errors - -def rpm_verify_package(vp_ts, header, verify_options): - """ - Verify a single package specified by header. Header is an rpm.hdr. - - If errors are found it returns a dictionary of errors. - - """ - # Set some transaction level flags. - vsflags = 0 - if 'nodigest' in verify_options: - vsflags |= rpm._RPMVSF_NODIGESTS - if 'nosignature' in verify_options: - vsflags |= rpm._RPMVSF_NOSIGNATURES - ovsflags = vp_ts.setVSFlags(vsflags) - - # Map from the Python options to the rpm bitwise flags. - omitmask = 0 - - if 'nolinkto' in verify_options: - omitmask |= VERIFY_LINKTO - if 'nomd5' in verify_options: - omitmask |= VERIFY_MD5 - if 'nosize' in verify_options: - omitmask |= VERIFY_SIZE - if 'nouser' in verify_options: - omitmask |= VERIFY_USER - if 'nogroup' in verify_options: - omitmask |= VERIFY_GROUP - if 'nomtime' in verify_options: - omitmask |= VERIFY_MTIME - if 'nomode' in verify_options: - omitmask |= VERIFY_MODE - if 'nordev' in verify_options: - omitmask |= VERIFY_RDEV - - omitmask = ((~omitmask & VERIFY_ATTRS) ^ VERIFY_ATTRS) - - package_results = {} - - # Check Signatures and Digests. - # No idea what this might return. Need to break something to see. - # Setting the vsflags above determines what gets checked in the header. - hdr_stat = vp_ts.hdrCheck(header.unload()) - if hdr_stat: - package_results['hdr'] = hdr_stat - - # Check Package Depencies. - if 'nodeps' not in verify_options: - dep_stat = rpm_verify_dependencies(header) - if dep_stat: - package_results['deps'] = dep_stat - - # Check all the package files. - if 'nofiles' not in verify_options: - vp_fi = header.fiFromHeader() - for fileinfo in vp_fi: - # Do not bother doing anything with ghost files. - # This is what RPM does. - if fileinfo[4] & rpm.RPMFILE_GHOST: - continue - - # This is only needed because of an inconsistency in the - # rpm.fi interface. - linktos = vp_fi.FLink() - - file_stat = rpm_verify_file(fileinfo, linktos, omitmask) - - #if len(file_stat) > 0 or options.verbose: - if len(file_stat) > 0: - fflags = fileinfo[4] - if fflags & rpm.RPMFILE_CONFIG: - file_stat.append('c') - elif fflags & rpm.RPMFILE_DOC: - file_stat.append('d') - elif fflags & rpm.RPMFILE_GHOST: - file_stat.append('g') - elif fflags & rpm.RPMFILE_LICENSE: - file_stat.append('l') - elif fflags & rpm.RPMFILE_PUBKEY: - file_stat.append('P') - elif fflags & rpm.RPMFILE_README: - file_stat.append('r') - else: - file_stat.append(' ') - - file_stat.append(fileinfo[0]) # The filename. - package_results.setdefault('files', []).append(file_stat) - - # Run the verify script if there is one. - # Do we want this? - #if 'noscripts' not in verify_options: - # script_stat = rpmVerifyscript() - # if script_stat: - # package_results['script'] = script_stat - - # If there have been any errors, add the package nevra to the result. - if len(package_results) > 0: - package_results.setdefault('nevra', (header[rpm.RPMTAG_NAME], \ - header[rpm.RPMTAG_EPOCH], \ - header[rpm.RPMTAG_VERSION], \ - header[rpm.RPMTAG_RELEASE], \ - header[rpm.RPMTAG_ARCH])) - else: - package_results = None - - # Put things back the way we found them. - vsflags = vp_ts.setVSFlags(ovsflags) - - return package_results - -def rpm_verify(verify_ts, verify_pkgspec, verify_options=[]): - """ - Requires rpmtransactionset() to be run first to get a ts. - - pkgspec is a dict specifying the package - e.g.: - For a single package - { name='foo', epoch='20', version='1', release='1', arch='x86_64'} - - For all packages - {} - - Or any combination of keywords to select one or more packages to verify. - - options is a list of 'rpm --verify' options. Default is to check everything. - e.g.: - [ 'nodeps', 'nodigest', 'nofiles', 'noscripts', 'nosignature', - 'nolinkto' 'nomd5', 'nosize', 'nouser', 'nogroup', 'nomtime', - 'nomode', 'nordev' ] - - Returns a list. One list entry per package. Each list entry is a - dictionary. Dict keys are 'files', 'deps', 'nevra' and 'hdr'. - Entries only get added for the failures. If nothing failed, None is - returned. - - Its all a bit messy and probably needs reviewing. - - [ { 'hdr': [???], - 'deps: [((name, version, release), (reqname, reqversion), - flags, suggest, sense), .... ] - 'files': [ ['filename1', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER' ], - ['filename2', 'RPMVERFIY_LSTATFAIL']] - 'nevra': ['name1', 'epoch1', 'version1', 'release1', 'arch1'] } - { 'hdr': [???], - 'deps: [((name, version, release), (reqname, reqversion), - flags, suggest, sense), .... ] - 'files': [ ['filename', 'RPMVERIFY_GROUP', 'RPMVERIFY_USER" ], - ['filename2', 'RPMVERFIY_LSTATFAIL']] - 'nevra': ['name2', 'epoch2', 'version2', 'release2', 'arch2'] } ] - - """ - verify_results = [] - headers = getheadersbykeyword(verify_ts, **verify_pkgspec) - for header in headers: - result = rpm_verify_package(verify_ts, header, verify_options) - if result: - verify_results.append(result) - - return verify_results - -def rpmtransactionset(): - """ - A simple wrapper for rpm.TransactionSet() to keep everthiing together. - Might use it to set some ts level flags later. - - """ - ts = rpm.TransactionSet() - return ts - -class Rpmtscallback(object): - """ - Callback for ts.run(). Used for adding, upgrading and removing packages. - Starting with all possible reasons codes, but bcfg2 will probably only - make use of a few of them. - - Mostly just printing stuff at the moment to understand how the callback - is used. - - """ - def __init__(self): - self.fdnos = {} - - def callback(self, reason, amount, total, key, client_data): - """ - Generic rpmts call back. - """ - if reason == rpm.RPMCALLBACK_INST_OPEN_FILE: - pass - elif reason == rpm.RPMCALLBACK_INST_CLOSE_FILE: - pass - elif reason == rpm.RPMCALLBACK_INST_START: - pass - elif reason == rpm.RPMCALLBACK_TRANS_PROGRESS or \ - reason == rpm.RPMCALLBACK_INST_PROGRESS: - pass - # rpm.RPMCALLBACK_INST_PROGRESS' - elif reason == rpm.RPMCALLBACK_TRANS_START: - pass - elif reason == rpm.RPMCALLBACK_TRANS_STOP: - pass - elif reason == rpm.RPMCALLBACK_REPACKAGE_START: - pass - elif reason == rpm.RPMCALLBACK_REPACKAGE_PROGRESS: - pass - elif reason == rpm.RPMCALLBACK_REPACKAGE_STOP: - pass - elif reason == rpm.RPMCALLBACK_UNINST_PROGRESS: - pass - elif reason == rpm.RPMCALLBACK_UNINST_START: - pass - elif reason == rpm.RPMCALLBACK_UNINST_STOP: - pass - # How do we get at this? - # RPM.modified += key - elif reason == rpm.RPMCALLBACK_UNPACK_ERROR: - pass - elif reason == rpm.RPMCALLBACK_CPIO_ERROR: - pass - elif reason == rpm.RPMCALLBACK_UNKNOWN: - pass - else: - print('ERROR - Fell through callBack') - - -def rpm_erase(erase_pkgspecs, erase_flags): - """ - pkgspecs is a list of pkgspec dicts specifying packages - e.g.: - For a single package - { name='foo', epoch='20', version='1', release='1', arch='x86_64'} - - """ - erase_ts_flags = 0 - if 'noscripts' in erase_flags: - erase_ts_flags |= rpm.RPMTRANS_FLAG_NOSCRIPTS - if 'notriggers' in erase_flags: - erase_ts_flags |= rpm.RPMTRANS_FLAG_NOTRIGGERS - if 'repackage' in erase_flags: - erase_ts_flags |= rpm.RPMTRANS_FLAG_REPACKAGE - - erase_ts = rpmtransactionset() - erase_ts.setFlags(erase_ts_flags) - - for pkgspec in erase_pkgspecs: - idx_list = getindexbykeyword(erase_ts, **pkgspec) - if len(idx_list) > 1 and not 'allmatches' in erase_flags: - #pass - print('ERROR - Multiple package match for erase', pkgspec) - else: - for idx in idx_list: - erase_ts.addErase(idx) - - #for te in erase_ts: - - erase_problems = [] - if 'nodeps' not in erase_flags: - erase_problems = erase_ts.check() - - if erase_problems == []: - erase_ts.order() - erase_callback = Rpmtscallback() - erase_ts.run(erase_callback.callback, 'Erase') - #else: - - erase_ts.closeDB() - del erase_ts - return erase_problems - -def display_verify_file(file_results): - ''' - Display file results similar to rpm --verify. - ''' - filename = file_results[-1] - filetype = file_results[-2] - - result_string = '' - - if 'RPMVERIFY_LSTATFAIL' in file_results: - result_string = 'missing ' - else: - if 'RPMVERIFY_FILESIZE' in file_results: - result_string = result_string + 'S' - else: - result_string = result_string + '.' - - if 'RPMVERIFY_MODE' in file_results: - result_string = result_string + 'M' - else: - result_string = result_string + '.' - - if 'RPMVERIFY_MD5' in file_results: - if 'RPMVERIFY_READFAIL' in file_results: - result_string = result_string + '?' - else: - result_string = result_string + '5' - else: - result_string = result_string + '.' - - if 'RPMVERIFY_RDEV' in file_results: - result_string = result_string + 'D' - else: - result_string = result_string + '.' - - if 'RPMVERIFY_LINKTO' in file_results: - if 'RPMVERIFY_READLINKFAIL' in file_results: - result_string = result_string + '?' - else: - result_string = result_string + 'L' - else: - result_string = result_string + '.' - - if 'RPMVERIFY_USER' in file_results: - result_string = result_string + 'U' - else: - result_string = result_string + '.' - - if 'RPMVERIFY_GROUP' in file_results: - result_string = result_string + 'G' - else: - result_string = result_string + '.' - - if 'RPMVERIFY_MTIME' in file_results: - result_string = result_string + 'T' - else: - result_string = result_string + '.' - - print(result_string + ' ' + filetype + ' ' + filename) - sys.stdout.flush() - -#=============================================================================== -# Some options and output to assist with development and testing. -# These are not intended for normal use. -if __name__ == "__main__": - - p = optparse.OptionParser() - - p.add_option('--name', action='store', \ - default=None, \ - help='''Package name to verify. - - ****************************************** - NOT SPECIFYING A NAME MEANS 'ALL' PACKAGES. - ****************************************** - - The specified operation will be carried out on all - instances of packages that match the package specification - (name, epoch, version, release, arch).''') - - p.add_option('--epoch', action='store', \ - default=None, \ - help='''Package epoch.''') - - p.add_option('--version', action='store', \ - default=None, \ - help='''Package version.''') - - p.add_option('--release', action='store', \ - default=None, \ - help='''Package release.''') - - p.add_option('--arch', action='store', \ - default=None, \ - help='''Package arch.''') - - p.add_option('--erase', '-e', action='store_true', \ - default=None, \ - help='''**************************************************** - REMOVE PACKAGES. THERE ARE NO WARNINGS. MULTIPLE - PACKAGES WILL BE REMOVED IF A FULL PACKAGE SPEC IS NOT - GIVEN. E.G. IF JUST A NAME IS GIVEN ALL INSTALLED - INSTANCES OF THAT PACKAGE WILL BE REMOVED PROVIDED - DEPENDENCY CHECKS PASS. IF JUST AN EPOCH IS GIVEN - ALL PACKAGE INSTANCES WITH THAT EPOCH WILL BE REMOVED. - ****************************************************''') - - p.add_option('--list', '-l', action='store_true', \ - help='''List package identity info. rpm -qa ish equivalent - intended for use in RefreshPackages().''') - - p.add_option('--verify', action='store_true', \ - help='''Verify Package(s). Output is only produced after all - packages has been verified. Be patient.''') - - p.add_option('--verbose', '-v', action='store_true', \ - help='''Verbose output for --verify option. Output is the - same as rpm -v --verify.''') - - p.add_option('--nodeps', action='store_true', \ - default=False, \ - help='Do not do dependency testing.') - - p.add_option('--nodigest', action='store_true', \ - help='Do not check package digests.') - - p.add_option('--nofiles', action='store_true', \ - help='Do not do file checks.') - - p.add_option('--noscripts', action='store_true', \ - help='Do not run verification scripts.') - - p.add_option('--nosignature', action='store_true', \ - help='Do not do package signature verification.') - - p.add_option('--nolinkto', action='store_true', \ - help='Do not do symlink tests.') - - p.add_option('--nomd5', action='store_true', \ - help='''Do not do MD5 checksums on files. Note that this does - not work for prelink files yet.''') - - p.add_option('--nosize', action='store_true', \ - help='''Do not do file size tests. Note that this does not work - for prelink files yet.''') - - p.add_option('--nouser', action='store_true', \ - help='Do not check file user ownership.') - - p.add_option('--nogroup', action='store_true', \ - help='Do not check file group ownership.') - - p.add_option('--nomtime', action='store_true', \ - help='Do not check file modification times.') - - p.add_option('--nomode', action='store_true', \ - help='Do not check file modes (permissions).') - - p.add_option('--nordev', action='store_true', \ - help='Do not check device node.') - - p.add_option('--notriggers', action='store_true', \ - help='Do not do not generate triggers on erase.') - - p.add_option('--repackage', action='store_true', \ - help='''Do repackage on erase.i Packages are put - in /var/spool/repackage.''') - - p.add_option('--allmatches', action='store_true', \ - help='''Remove all package instances that match the - pkgspec. - - *************************************************** - NO WARNINGS ARE GIVEN. IF THERE IS NO PACKAGE SPEC - THAT MEANS ALL PACKAGES!!!! - ***************************************************''') - - options, arguments = p.parse_args() - - pkgspec = {} - rpm_options = [] - - if options.nodeps: - rpm_options.append('nodeps') - - if options.nodigest: - rpm_options.append('nodigest') - - if options.nofiles: - rpm_options.append('nofiles') - - if options.noscripts: - rpm_options.append('noscripts') - - if options.nosignature: - rpm_options.append('nosignature') - - if options.nolinkto: - rpm_options.append('nolinkto') - - if options.nomd5: - rpm_options.append('nomd5') - - if options.nosize: - rpm_options.append('nosize') - - if options.nouser: - rpm_options.append('nouser') - - if options.nogroup: - rpm_options.append('nogroup') - - if options.nomtime: - rpm_options.append('nomtime') - - if options.nomode: - rpm_options.append('nomode') - - if options.nordev: - rpm_options.append('nordev') - - if options.repackage: - rpm_options.append('repackage') - - if options.allmatches: - rpm_options.append('allmatches') - - main_ts = rpmtransactionset() - - cmdline_pkgspec = {} - if options.name != 'all': - if options.name: - cmdline_pkgspec['name'] = str(options.name) - if options.epoch: - cmdline_pkgspec['epoch'] = str(options.epoch) - if options.version: - cmdline_pkgspec['version'] = str(options.version) - if options.release: - cmdline_pkgspec['release'] = str(options.release) - if options.arch: - cmdline_pkgspec['arch'] = str(options.arch) - - if options.verify: - results = rpm_verify(main_ts, cmdline_pkgspec, rpm_options) - for r in results: - files = r.get('files', '') - for f in files: - display_verify_file(f) - - elif options.list: - for p in rpmpackagelist(main_ts): - print(p) - - elif options.erase: - if options.name: - rpm_erase([cmdline_pkgspec], rpm_options) - else: - print('You must specify the "--name" option') diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/SELinux.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/SELinux.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/SELinux.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/SELinux.py 2017-01-10 19:18:17.000000000 +0000 @@ -7,6 +7,7 @@ import glob import struct import socket +import logging import selinux import seobject import Bcfg2.Client.XML @@ -77,14 +78,13 @@ SEPort=['name', 'selinuxtype'], SEUser=['name', 'roles', 'prefix']) - def __init__(self, logger, setup, config): - Bcfg2.Client.Tools.Tool.__init__(self, logger, setup, config) + def __init__(self, config): + Bcfg2.Client.Tools.Tool.__init__(self, config) self.handlers = {} for handler in self.__handles__: etype = handler[0] self.handlers[etype] = \ - globals()["SELinux%sHandler" % etype.title()](self, logger, - setup, config) + globals()["SELinux%sHandler" % etype.title()](self, config) self.txn = False self.post_txn_queue = [] @@ -100,10 +100,6 @@ # http://docs.python.org/2/reference/datamodel.html#object.__getattr__ # for details - def BundleUpdated(self, _, states): - for handler in self.handlers.values(): - handler.BundleUpdated(states) - def FindExtra(self): extra = [] for handler in self.handlers.values(): @@ -119,7 +115,7 @@ in the specification """ return self.handlers[entry.tag].primarykey(entry) - def Install(self, entries, states): + def Install(self, entries): # start a transaction semanage = seobject.semanageRecords("") if hasattr(semanage, "start"): @@ -129,13 +125,14 @@ else: self.logger.debug("SELinux transactions not supported; this may " "slow things down considerably") - Bcfg2.Client.Tools.Tool.Install(self, entries, states) + states = Bcfg2.Client.Tools.Tool.Install(self, entries) if hasattr(semanage, "finish"): self.logger.debug("Committing SELinux transaction") semanage.finish() self.txn = False for func, arg, kwargs in self.post_txn_queue: states[arg] = func(*arg, **kwargs) + return states def GenericSEInstall(self, entry): """Dispatch install to the proper method according to entry tag""" @@ -144,7 +141,7 @@ def GenericSEVerify(self, entry, _): """Dispatch verify to the proper method according to entry tag""" rv = self.handlers[entry.tag].Verify(entry) - if entry.get('qtext') and self.setup['interactive']: + if entry.get('qtext') and Bcfg2.Options.setup.interactive: entry.set('qtext', '%s\nInstall %s: (y/N) ' % (entry.get('qtext'), @@ -174,10 +171,9 @@ custom_re = re.compile(r' (?P\S+)$') custom_format = None - def __init__(self, tool, logger, setup, config): + def __init__(self, tool, config): self.tool = tool - self.logger = logger - self.setup = setup + self.logger = logging.getLogger(self.__class__.__name__) self.config = config self._records = None self._all = None @@ -229,7 +225,7 @@ match = self.custom_re.search(cmd) if match: if (len(self.custom_format) == 1 and - self.custom_format[0] == "name"): + self.custom_format[0] == "name"): keys.append(match.group("name")) else: keys.append(tuple([match.group(k) @@ -379,11 +375,6 @@ for key in records.keys() if key not in specified] - def BundleUpdated(self, states): - """ perform any additional magic tasks that need to be run - when a bundle is updated """ - pass - class SELinuxSebooleanHandler(SELinuxEntryHandler): """ handle SELinux boolean entries """ @@ -631,8 +622,8 @@ etype = "user" value_format = ("prefix", None, None, "roles") - def __init__(self, tool, logger, setup, config): - SELinuxEntryHandler.__init__(self, tool, logger, setup, config) + def __init__(self, tool, config): + SELinuxEntryHandler.__init__(self, tool, config) self.needs_prefix = False @property @@ -725,9 +716,9 @@ etype = "module" value_format = (None, "disabled") - def __init__(self, tool, logger, setup, config): - SELinuxEntryHandler.__init__(self, tool, logger, setup, config) - self.filetool = POSIXFile(logger, setup, config) + def __init__(self, tool, config): + SELinuxEntryHandler.__init__(self, tool, config) + self.filetool = POSIXFile(config) try: self.setype = selinux.selinux_getpolicytype()[1] except IndexError: diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/Systemd.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/Systemd.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/Systemd.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/Systemd.py 2017-01-10 19:18:17.000000000 +0000 @@ -2,6 +2,8 @@ """This is systemd support.""" +import glob +import os import Bcfg2.Client.Tools import Bcfg2.Client.XML @@ -13,34 +15,93 @@ __handles__ = [('Service', 'systemd')] __req__ = {'Service': ['name', 'status']} + def get_svc_name(self, service): + """Append .service to name if name doesn't specify a unit type.""" + svc = service.get('name') + if svc.endswith(('.service', '.socket', '.device', '.mount', + '.automount', '.swap', '.target', '.path', + '.timer', '.snapshot', '.slice', '.scope')): + return svc + else: + return '%s.service' % svc + def get_svc_command(self, service, action): - return "/bin/systemctl %s %s.service" % (action, service.get('name')) + return "/bin/systemctl %s %s" % (action, self.get_svc_name(service)) def VerifyService(self, entry, _): """Verify Service status for entry.""" - if entry.get('status') == 'ignore': + entry.set('target_status', entry.get('status')) # for reporting + + bootstatus = self.get_bootstatus(entry) + if bootstatus is None: + # bootstatus is unspecified and status is ignore return True - cmd = "/bin/systemctl status %s.service " % (entry.get('name')) - rv = self.cmd.run(cmd) + if self.cmd.run(self.get_svc_command(entry, 'is-enabled')): + current_bootstatus = 'on' + else: + current_bootstatus = 'off' - if 'Loaded: error' in rv.stdout: - entry.set('current_status', 'off') - return False - elif 'Active: active' in rv.stdout: - entry.set('current_status', 'on') - return entry.get('status') == 'on' + if entry.get('status') == 'ignore': + return current_bootstatus == bootstatus + + cmd = self.get_svc_command(entry, 'show') + ' -p ActiveState' + rv = self.cmd.run(cmd) + if rv.stdout.strip() in ('ActiveState=active', + 'ActiveState=activating', + 'ActiveState=reloading'): + current_status = 'on' else: - entry.set('current_status', 'off') - return entry.get('status') == 'off' + current_status = 'off' + entry.set('current_status', current_status) + return (entry.get('status') == current_status and + bootstatus == current_bootstatus) def InstallService(self, entry): """Install Service entry.""" - if entry.get('status') == 'on': - rv = self.cmd.run(self.get_svc_command(entry, 'enable')).success - rv &= self.cmd.run(self.get_svc_command(entry, 'start')).success + self.logger.info("Installing Service %s" % (entry.get('name'))) + bootstatus = self.get_bootstatus(entry) + if bootstatus is None: + # bootstatus is unspecified and status is ignore + return True + + # Enable or disable the service + if bootstatus == 'on': + cmd = self.get_svc_command(entry, 'enable') else: - rv = self.cmd.run(self.get_svc_command(entry, 'stop')).success - rv &= self.cmd.run(self.get_svc_command(entry, 'disable')).success + cmd = self.get_svc_command(entry, 'disable') + if not self.cmd.run(cmd).success: + # Return failure immediately and do not start/stop the service. + return False + + # Start or stop the service, depending on the current service_mode + cmd = None + if Bcfg2.Options.setup.service_mode == 'disabled': + # 'disabled' means we don't attempt to modify running svcs + pass + elif Bcfg2.Options.setup.service_mode == 'build': + # 'build' means we attempt to stop all services started + if entry.get('current_status') == 'on': + cmd = self.get_svc_command(entry, 'stop') + else: + if entry.get('status') == 'on': + cmd = self.get_svc_command(entry, 'start') + elif entry.get('status') == 'off': + cmd = self.get_svc_command(entry, 'stop') + + if cmd: + return self.cmd.run(cmd).success + else: + return True - return rv + def FindExtra(self): + """Find Extra Systemd Service entries.""" + specified = [self.get_svc_name(entry) + for entry in self.getSupportedEntries()] + extra = set() + for fname in glob.glob("/etc/systemd/system/*.wants/*"): + name = os.path.basename(fname) + if name not in specified: + extra.add(name) + return [Bcfg2.Client.XML.Element('Service', name=name, type='systemd') + for name in list(extra)] diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/SYSV.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/SYSV.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/SYSV.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/SYSV.py 2017-01-10 19:18:17.000000000 +0000 @@ -34,8 +34,8 @@ pkgtype = 'sysv' pkgtool = ("/usr/sbin/pkgadd %s -n -d %%s", (('%s %s', ['url', 'name']))) - def __init__(self, logger, setup, config): - Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config) + def __init__(self, config): + Bcfg2.Client.Tools.PkgTool.__init__(self, config) # noaskfile needs to live beyond __init__ otherwise file is removed self.noaskfile = tempfile.NamedTemporaryFile() self.noaskname = self.noaskfile.name @@ -52,18 +52,17 @@ self.origpkgtool = self.pkgtool def pkgmogrify(self, packages): - """ Take a list of pkg objects, check for a 'simplename' attribute. + """ Take a list of pkg objects, check for a 'simplefile' attribute. If present, insert a _sysv_pkg_path attribute to the package and download the datastream format SYSV package to a temporary file. """ for pkg in packages: - if pkg.get('simplename'): + if pkg.get('simplefile'): tmpfile = tempfile.NamedTemporaryFile() self.tmpfiles.append(tmpfile) - self.logger.info("Downloading %s%s to %s" % (pkg.get('url'), - pkg.get('simplename'), tmpfile.name)) - urlretrieve("%s/%s" % (pkg.get('url'), pkg.get('simplename')), - tmpfile.name) + self.logger.info("Downloading %s to %s" % (pkg.get('url'), + tmpfile.name)) + urlretrieve(pkg.get('url'), tmpfile.name) pkg.set('_sysv_pkg_path', tmpfile.name) def _get_package_command(self, packages): @@ -82,9 +81,9 @@ self.logger.debug("Calling install command: %s" % pkgcmd) return pkgcmd - def Install(self, packages, states): + def Install(self, packages): self.pkgmogrify(packages) - super(SYSV, self).Install(packages, states) + super(SYSV, self).Install(packages) def RefreshPackages(self): """Refresh memory hashes of packages.""" @@ -120,7 +119,7 @@ self.logger.debug("Package %s not installed" % entry.get("name")) else: - if self.setup['quick'] or \ + if Bcfg2.Options.setup.quick or \ entry.attrib.get('verify', 'true') == 'false': return True rv = self.cmd.run("/usr/sbin/pkgchk -n %s" % entry.get('name')) diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/VCS.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/VCS.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/VCS.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/VCS.py 2017-01-10 19:18:17.000000000 +0000 @@ -88,8 +88,10 @@ return False try: - client, path = dulwich.client.get_transport_and_path(entry.get('sourceurl')) - remote_refs = client.fetch_pack(path, (lambda x: None), None, None, None) + client, path = dulwich.client.get_transport_and_path( + entry.get('sourceurl')) + remote_refs = client.fetch_pack(path, + (lambda x: None), None, None, None) if expected_rev in remote_refs: expected_rev = remote_refs[expected_rev] except: @@ -119,10 +121,12 @@ dulwich.file.ensure_dir_exists(destname) destr = dulwich.repo.Repo.init(destname) - cl, host_path = dulwich.client.get_transport_and_path(entry.get('sourceurl')) + determine_wants = destr.object_store.determine_wants_all + cl, host_path = dulwich.client.get_transport_and_path( + entry.get('sourceurl')) remote_refs = cl.fetch(host_path, destr, - determine_wants=destr.object_store.determine_wants_all, + determine_wants=determine_wants, progress=sys.stdout.write) if entry.get('revision') in remote_refs: diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/YUM24.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/YUM24.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/YUM24.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/YUM24.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,399 +0,0 @@ -"""This provides bcfg2 support for yum.""" - -import copy -import os.path -import sys -import yum -import Bcfg2.Client.XML -from Bcfg2.Client.Tools.RPM import RPM - - -def build_yname(pkgname, inst): - """Build yum appropriate package name.""" - ypname = pkgname - if inst.get('version') != 'any': - ypname += '-' - if inst.get('epoch', False): - ypname += "%s:" % inst.get('epoch') - if inst.get('version', False) and inst.get('version') != 'any': - ypname += "%s" % (inst.get('version')) - if inst.get('release', False) and inst.get('release') != 'any': - ypname += "-%s" % (inst.get('release')) - if inst.get('arch', False) and inst.get('arch') != 'any': - ypname += ".%s" % (inst.get('arch')) - return ypname - - -class YUM24(RPM): - """Support for Yum packages.""" - pkgtype = 'yum' - deprecated = True - __execs__ = ['/usr/bin/yum', '/var/lib/rpm'] - __handles__ = [('Package', 'yum'), - ('Package', 'rpm'), - ('Path', 'ignore')] - - __req__ = {'Package': ['name', 'version']} - __ireq__ = {'Package': ['name']} - #__ireq__ = {'Package': ['name', 'version']} - - __new_req__ = {'Package': ['name'], - 'Instance': ['version', 'release', 'arch']} - __new_ireq__ = {'Package': ['name'], \ - 'Instance': []} - #__new_ireq__ = {'Package': ['name', 'uri'], \ - # 'Instance': ['simplefile', 'version', 'release', 'arch']} - - __gpg_req__ = {'Package': ['name', 'version']} - __gpg_ireq__ = {'Package': ['name', 'version']} - - __new_gpg_req__ = {'Package': ['name'], - 'Instance': ['version', 'release']} - __new_gpg_ireq__ = {'Package': ['name'], - 'Instance': ['version', 'release']} - - def __init__(self, logger, setup, config): - RPM.__init__(self, logger, setup, config) - self.__important__ = self.__important__ + \ - [entry.get('name') for struct in config \ - for entry in struct \ - if entry.tag in ['Path', 'ConfigFile'] and \ - (entry.get('name').startswith('/etc/yum.d') \ - or entry.get('name').startswith('/etc/yum.repos.d')) \ - or entry.get('name') == '/etc/yum.conf'] - self.autodep = setup.get("yum24_autodep") - self.yum_avail = dict() - self.yum_installed = dict() - self.yb = yum.YumBase() - self.yb.doConfigSetup() - self.yb.doTsSetup() - self.yb.doRpmDBSetup() - yup = self.yb.doPackageLists(pkgnarrow='updates') - if hasattr(self.yb.rpmdb, 'pkglist'): - yinst = self.yb.rpmdb.pkglist - else: - yinst = self.yb.rpmdb.getPkgList() - for dest, source in [(self.yum_avail, yup.updates), - (self.yum_installed, yinst)]: - for pkg in source: - if dest is self.yum_avail: - pname = pkg.name - data = {pkg.arch: (pkg.epoch, pkg.version, pkg.release)} - else: - pname = pkg[0] - if pkg[1] is None: - a = 'noarch' - else: - a = pkg[1] - if pkg[2] is None: - e = '0' - else: - e = pkg[2] - data = {a: (e, pkg[3], pkg[4])} - if pname in dest: - dest[pname].update(data) - else: - dest[pname] = dict(data) - - def VerifyPackage(self, entry, modlist): - pinned_version = None - if entry.get('version', False) == 'auto': - # old style entry; synthesize Instances from current installed - if entry.get('name') not in self.yum_installed and \ - entry.get('name') not in self.yum_avail: - # new entry; fall back to default - entry.set('version', 'any') - else: - data = copy.copy(self.yum_installed[entry.get('name')]) - if entry.get('name') in self.yum_avail: - # installed but out of date - data.update(self.yum_avail[entry.get('name')]) - for (arch, (epoch, vers, rel)) in list(data.items()): - x = Bcfg2.Client.XML.SubElement(entry, "Instance", - name=entry.get('name'), - version=vers, arch=arch, - release=rel, epoch=epoch) - if 'verify_flags' in entry.attrib: - x.set('verify_flags', entry.get('verify_flags')) - if 'verify' in entry.attrib: - x.set('verify', entry.get('verify')) - - if entry.get('type', False) == 'yum': - # Check for virtual provides or packages. If we don't have - # this package use Yum to resolve it to a real package name - knownPkgs = list(self.yum_installed.keys()) + list(self.yum_avail.keys()) - if entry.get('name') not in knownPkgs: - # If the package name matches something installed - # or available the that's the correct package. - try: - pkgDict = dict([(i.name, i) for i in \ - self.yb.returnPackagesByDep(entry.get('name'))]) - except yum.Errors.YumBaseError: - e = sys.exc_info()[1] - self.logger.error('Yum Error Depsolving for %s: %s' % \ - (entry.get('name'), str(e))) - pkgDict = {} - - if len(pkgDict) > 1: - # What do we do with multiple packages? - s = "YUM24: returnPackagesByDep(%s) returned many packages" - self.logger.info(s % entry.get('name')) - s = "YUM24: matching packages: %s" - self.logger.info(s % str(list(pkgDict.keys()))) - pkgs = set(pkgDict.keys()) & set(self.yum_installed.keys()) - if len(pkgs) > 0: - # Virtual packages matches an installed real package - pkg = pkgDict[pkgs.pop()] - s = "YUM24: chosing: %s" % pkg.name - self.logger.info(s) - else: - # What's the right package? This will fail verify - # and Yum should Do The Right Thing on package install - pkg = None - elif len(pkgDict) == 1: - pkg = list(pkgDict.values())[0] - else: # len(pkgDict) == 0 - s = "YUM24: returnPackagesByDep(%s) returned no results" - self.logger.info(s % entry.get('name')) - pkg = None - - if pkg is not None: - s = "YUM24: remapping virtual package %s to %s" - self.logger.info(s % (entry.get('name'), pkg.name)) - entry.set('name', pkg.name) - - return RPM.VerifyPackage(self, entry, modlist) - - def Install(self, packages, states): - """ - Try and fix everything that YUM24.VerifyPackages() found wrong for - each Package Entry. This can result in individual RPMs being - installed (for the first time), deleted, downgraded - or upgraded. - - NOTE: YUM can not reinstall a package that it thinks is already - installed. - - packages is a list of Package Elements that has - states[] == False - - The following effects occur: - - states{} is conditionally updated for each package. - - self.installed{} is rebuilt, possibly multiple times. - - self.instance_status{} is conditionally updated for each instance - of a package. - - Each package will be added to self.modified[] if its states{} - entry is set to True. - - """ - self.logger.info('Running YUM24.Install()') - - install_pkgs = [] - gpg_keys = [] - upgrade_pkgs = [] - - # Remove extra instances. - # Can not reverify because we don't have a package entry. - if len(self.extra_instances) > 0: - if (self.setup.get('remove') == 'all' or \ - self.setup.get('remove') == 'packages'): - self.Remove(self.extra_instances) - else: - self.logger.info("The following extra package instances will be removed by the '-r' option:") - for pkg in self.extra_instances: - for inst in pkg: - self.logger.info(" %s %s" % \ - ((pkg.get('name'), self.str_evra(inst)))) - - # Figure out which instances of the packages actually need something - # doing to them and place in the appropriate work 'queue'. - for pkg in packages: - insts = [pinst for pinst in pkg \ - if pinst.tag in ['Instance', 'Package']] - if insts: - for inst in insts: - if self.FixInstance(inst, self.instance_status[inst]): - if self.instance_status[inst].get('installed', False) \ - == False: - if pkg.get('name') == 'gpg-pubkey': - gpg_keys.append(inst) - else: - install_pkgs.append(inst) - elif self.instance_status[inst].get('version_fail', \ - False) == True: - upgrade_pkgs.append(inst) - else: - install_pkgs.append(pkg) - - # Install GPG keys. - # Alternatively specify the required keys using 'gpgkey' in the - # repository definition in yum.conf. YUM will install the keys - # automatically. - if len(gpg_keys) > 0: - for inst in gpg_keys: - self.logger.info("Installing GPG keys.") - if inst.get('simplefile') is None: - self.logger.error("GPG key has no simplefile attribute") - continue - key_arg = os.path.join(self.instance_status[inst].get('pkg').get('uri'), \ - inst.get('simplefile')) - if self.cmd.run("rpm --import %s" % key_arg).success: - self.logger.debug("Unable to install %s-%s" % \ - (self.instance_status[inst].get('pkg').get('name'), \ - self.str_evra(inst))) - else: - self.logger.debug("Installed %s-%s-%s" % \ - (self.instance_status[inst].get('pkg').get('name'), \ - inst.get('version'), inst.get('release'))) - self.RefreshPackages() - self.gpg_keyids = self.getinstalledgpg() - pkg = self.instance_status[gpg_keys[0]].get('pkg') - states[pkg] = self.VerifyPackage(pkg, []) - - # Install packages. - if len(install_pkgs) > 0: - self.logger.info("Attempting to install packages") - - if self.autodep: - pkgtool = "/usr/bin/yum -d0 -y install %s" - else: - pkgtool = "/usr/bin/yum -d0 install %s" - - install_args = [] - for inst in install_pkgs: - pkg_arg = self.instance_status[inst].get('pkg').get('name') - install_args.append(build_yname(pkg_arg, inst)) - - if self.cmd.run(pkgtool % " ".join(install_args)).success: - # The yum command succeeded. All packages installed. - self.logger.info("Single Pass for Install Succeeded") - self.RefreshPackages() - else: - # The yum command failed. No packages installed. - # Try installing instances individually. - self.logger.error("Single Pass Install of Packages Failed") - installed_instances = [] - for inst in install_pkgs: - pkg_arg = build_yname(self.instance_status[inst].get('pkg').get('name'), inst) - - if self.cmd.run(pkgtool % pkg_arg).success: - installed_instances.append(inst) - else: - self.logger.debug("%s %s would not install." % - (self.instance_status[inst].get('pkg').get('name'), - self.str_evra(inst))) - self.RefreshPackages() - - # Fix upgradeable packages. - if len(upgrade_pkgs) > 0: - self.logger.info("Attempting to upgrade packages") - - if self.autodep: - pkgtool = "/usr/bin/yum -d0 -y update %s" - else: - pkgtool = "/usr/bin/yum -d0 update %s" - - upgrade_args = [] - for inst in upgrade_pkgs: - pkg_arg = build_yname(self.instance_status[inst].get('pkg').get('name'), inst) - upgrade_args.append(pkg_arg) - - if self.cmd.run(pkgtool % " ".join(upgrade_args)).success: - # The yum command succeeded. All packages installed. - self.logger.info("Single Pass for Install Succeeded") - self.RefreshPackages() - else: - # The yum command failed. No packages installed. - # Try installing instances individually. - self.logger.error("Single Pass Install of Packages Failed") - installed_instances = [] - for inst in upgrade_pkgs: - pkg_arg = build_yname(self.instance_status[inst].get('pkg').get('name'), inst) - if self.cmd.run(pkgtool % pkg_arg).success: - installed_instances.append(inst) - else: - self.logger.debug("%s %s would not install." % \ - (self.instance_status[inst].get('pkg').get('name'), \ - self.str_evra(inst))) - - self.RefreshPackages() - - if not self.setup['kevlar']: - for pkg_entry in [p for p in packages if self.canVerify(p)]: - self.logger.debug("Reverifying Failed Package %s" % (pkg_entry.get('name'))) - states[pkg_entry] = self.VerifyPackage(pkg_entry, \ - self.modlists.get(pkg_entry, [])) - - for entry in [ent for ent in packages if states[ent]]: - self.modified.append(entry) - - def Remove(self, packages): - """ - Remove specified entries. - - packages is a list of Package Entries with Instances generated - by FindExtra(). - """ - self.logger.debug('Running YUM24.Remove()') - - if self.autodep: - pkgtool = "/usr/bin/yum -d0 -y erase %s" - else: - pkgtool = "/usr/bin/yum -d0 erase %s" - - erase_args = [] - for pkg in packages: - for inst in pkg: - if pkg.get('name') != 'gpg-pubkey': - pkg_arg = pkg.get('name') + '-' - if inst.get('epoch', False): - pkg_arg = pkg_arg + inst.get('epoch') + ':' - pkg_arg = pkg_arg + inst.get('version') + '-' + inst.get('release') - if inst.get('arch', False): - pkg_arg = pkg_arg + '.' + inst.get('arch') - erase_args.append(pkg_arg) - else: - pkgspec = {'name': pkg.get('name'), - 'version': inst.get('version'), - 'release': inst.get('release')} - self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\ - % (pkgspec.get('name'), self.str_evra(pkgspec))) - self.logger.info(" This package will be deleted in a future version of the YUM24 driver.") - - rv = self.cmd.run(pkgtool % " ".join(erase_args)) - if rv.success: - self.modified += packages - for pkg in erase_args: - self.logger.info("Deleted %s" % (pkg)) - else: - self.logger.info("Bulk erase failed with errors:") - self.logger.debug("Erase results: %s" % rv.error) - self.logger.info("Attempting individual erase for each package.") - for pkg in packages: - pkg_modified = False - for inst in pkg: - if pkg.get('name') != 'gpg-pubkey': - pkg_arg = pkg.get('name') + '-' - if 'epoch' in inst.attrib: - pkg_arg = pkg_arg + inst.get('epoch') + ':' - pkg_arg = pkg_arg + inst.get('version') + '-' + inst.get('release') - if 'arch' in inst.attrib: - pkg_arg = pkg_arg + '.' + inst.get('arch') - else: - self.logger.info("WARNING: gpg-pubkey package not in configuration %s %s"\ - % (pkg.get('name'), self.str_evra(pkg))) - self.logger.info(" This package will be deleted in a future version of the YUM24 driver.") - continue - - rv = self.cmd.run(self.pkgtool % pkg_arg) - if rv.success: - pkg_modified = True - self.logger.info("Deleted %s" % pkg_arg) - else: - self.logger.error("Unable to delete %s" % pkg_arg) - self.logger.debug("Failure: %s" % rv.error) - if pkg_modified == True: - self.modified.append(pkg) - - self.RefreshPackages() - self.extra = self.FindExtra() diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/YUMng.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/YUMng.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/YUMng.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/YUMng.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -""" YUM driver called 'YUMng' for backwards compat """ - -from Bcfg2.Client.Tools.YUM import YUM - - -class YUMng(YUM): - """ YUM driver called 'YUMng' for backwards compat """ - deprecated = True - conflicts = ['YUM24', 'RPM', 'RPMng'] diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/YUM.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/YUM.py --- bcfg2-1.3.5/src/lib/Bcfg2/Client/Tools/YUM.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Client/Tools/YUM.py 2017-01-10 19:18:17.000000000 +0000 @@ -3,6 +3,7 @@ import copy import os.path import sys +import logging import yum import yum.packages import yum.rpmtrans @@ -10,8 +11,10 @@ import yum.Errors import yum.misc import rpmUtils.arch +import rpmUtils.miscutils import Bcfg2.Client.XML import Bcfg2.Client.Tools +import Bcfg2.Options def build_yname(pkgname, inst): @@ -65,13 +68,13 @@ """We subclass the default RPM transaction callback so that we can control Yum's verbosity and pipe it through the right logger.""" - def __init__(self, logger): + def __init__(self): yum.rpmtrans.RPMBaseCallback.__init__(self) # we want to log events to *both* the Bcfg2 logger (which goes # to stderr or syslog or wherever the user wants it to go) # *and* the yum file logger, which will go to yum.log (ticket # #1103) - self.bcfg2_logger = logger + self.bcfg2_logger = logging.getLogger(self.__class__.__name__) self.state = None self.package = None @@ -110,13 +113,50 @@ """Class to handle display of what step we are in the Yum transaction such as downloading packages, etc.""" - def __init__(self, logger): + def __init__(self): yum.callbacks.ProcessTransBaseCallback.__init__(self) - self.logger = logger + self.logger = logging.getLogger(self.__class__.__name__) class YUM(Bcfg2.Client.Tools.PkgTool): """Support for Yum packages.""" + + options = Bcfg2.Client.Tools.PkgTool.options + [ + Bcfg2.Options.BooleanOption( + cf=('YUM', 'pkg_checks'), default=True, dest="yum_pkg_checks", + help="Perform YUM package checks"), + Bcfg2.Options.BooleanOption( + cf=('YUM', 'pkg_verify'), default=True, dest="yum_pkg_verify", + help="Perform YUM package verify"), + Bcfg2.Options.BooleanOption( + cf=('YUM', 'install_missing'), default=True, + dest="yum_install_missing", + help="Install missing packages"), + Bcfg2.Options.Option( + cf=('YUM', 'erase_flags'), default=["allmatches"], + dest="yum_erase_flags", type=Bcfg2.Options.Types.comma_list, + help="YUM erase flags"), + Bcfg2.Options.BooleanOption( + cf=('YUM', 'fix_version'), default=True, + dest="yum_fix_version", + help="Fix (upgrade or downgrade) packages with the wrong version"), + Bcfg2.Options.BooleanOption( + cf=('YUM', 'reinstall_broken'), default=True, + dest="yum_reinstall_broken", + help="Reinstall packages that fail to verify"), + Bcfg2.Options.Option( + cf=('YUM', 'verify_flags'), default=[], + dest="yum_verify_flags", type=Bcfg2.Options.Types.comma_list, + help="YUM verify flags"), + Bcfg2.Options.Option( + cf=('YUM', 'disabled_plugins'), default=[], + type=Bcfg2.Options.Types.comma_list, dest="yum_disabled_plugins", + help="YUM disabled plugins"), + Bcfg2.Options.Option( + cf=('YUM', 'enabled_plugins'), default=[], + type=Bcfg2.Options.Types.comma_list, dest="yum_enabled_plugins", + help="YUM enabled plugins")] + pkgtype = 'yum' __execs__ = [] __handles__ = [('Package', 'yum'), @@ -126,11 +166,11 @@ __req__ = {'Package': ['type'], 'Path': ['type']} - conflicts = ['YUM24', 'RPM', 'RPMng', 'YUMng'] + conflicts = ['RPM'] - def __init__(self, logger, setup, config): - self.yumbase = self._loadYumBase(setup=setup, logger=logger) - Bcfg2.Client.Tools.PkgTool.__init__(self, logger, setup, config) + def __init__(self, config): + self.yumbase = self._loadYumBase() + Bcfg2.Client.Tools.PkgTool.__init__(self, config) self.ignores = [] for struct in config: self.ignores.extend([entry.get('name') @@ -171,51 +211,57 @@ else: dest[pname] = dict(data) - # Process the Yum section from the config file. These are all - # boolean flags, either we do stuff or we don't - self.pkg_checks = self.setup["yum_pkg_checks"] - self.pkg_verify = self.setup["yum_pkg_verify"] - self.do_install = self.setup["yum_installed_action"] == "install" - self.do_upgrade = self.setup["yum_version_fail_action"] == "upgrade" - self.do_reinst = self.setup["yum_verify_fail_action"] == "reinstall" - self.verify_flags = self.setup["yum_verify_flags"] - self.installonlypkgs = self.yumbase.conf.installonlypkgs if 'gpg-pubkey' not in self.installonlypkgs: self.installonlypkgs.append('gpg-pubkey') - self.logger.debug("Yum: Install missing: %s" % self.do_install) - self.logger.debug("Yum: pkg_checks: %s" % self.pkg_checks) - self.logger.debug("Yum: pkg_verify: %s" % self.pkg_verify) - self.logger.debug("Yum: Upgrade on version fail: %s" % self.do_upgrade) - self.logger.debug("Yum: Reinstall on verify fail: %s" % self.do_reinst) + self.logger.debug("Yum: Install missing: %s" % + Bcfg2.Options.setup.yum_install_missing) + self.logger.debug("Yum: pkg_checks: %s" % + Bcfg2.Options.setup.yum_pkg_checks) + self.logger.debug("Yum: pkg_verify: %s" % + Bcfg2.Options.setup.yum_pkg_verify) + self.logger.debug("Yum: Upgrade on version fail: %s" % + Bcfg2.Options.setup.yum_fix_version) + self.logger.debug("Yum: Reinstall on verify fail: %s" % + Bcfg2.Options.setup.yum_reinstall_broken) self.logger.debug("Yum: installonlypkgs: %s" % self.installonlypkgs) - self.logger.debug("Yum: verify_flags: %s" % self.verify_flags) + self.logger.debug("Yum: verify_flags: %s" % + Bcfg2.Options.setup.yum_verify_flags) + self.logger.debug("Yum: disabled_plugins: %s" % + Bcfg2.Options.setup.yum_disabled_plugins) + self.logger.debug("Yum: enabled_plugins: %s" % + Bcfg2.Options.setup.yum_enabled_plugins) - def _loadYumBase(self, setup=None, logger=None): + def _loadYumBase(self): ''' this may be called before PkgTool.__init__() is called on this object (when the YUM object is first instantiated; PkgTool.__init__() calls RefreshPackages(), which requires a YumBase object already exist), or after __init__() has completed, when we reload the yum config before installing - packages. Consequently, we support both methods by allowing - setup and logger, the only object properties we use in this - function, to be passed as keyword arguments or to be omitted - and drawn from the object itself.''' + packages. ''' rv = yum.YumBase() # pylint: disable=C0103 - if setup is None: - setup = self.setup - if logger is None: + if hasattr(self, "logger"): logger = self.logger + else: + logger = logging.getLogger(self.name) - if setup['debug']: + if Bcfg2.Options.setup.debug: debuglevel = 3 - elif setup['verbose']: + elif Bcfg2.Options.setup.verbose: debuglevel = 2 else: debuglevel = 0 + if len(Bcfg2.Options.setup.yum_disabled_plugins) > 0: + rv.preconf.disabled_plugins = \ + Bcfg2.Options.setup.yum_disabled_plugins + + if len(Bcfg2.Options.setup.yum_enabled_plugins) > 0: + rv.preconf.enabled_plugins = \ + Bcfg2.Options.setup.yum_enabled_plugins + # pylint: disable=E1121,W0212 try: rv.preconf.debuglevel = debuglevel @@ -242,7 +288,7 @@ to the newest available """ # old style entry; synthesize Instances from current installed if (entry.get('name') not in self.yum_installed and - entry.get('name') not in self.yum_avail): + entry.get('name') not in self.yum_avail): # new entry; fall back to default entry.set('version', 'any') else: @@ -296,7 +342,7 @@ missing = Bcfg2.Client.Tools.PkgTool.missing_attrs(self, entry) if (entry.get('name', None) is None and - entry.get('group', None) is None): + entry.get('group', None) is None): missing += ['name', 'group'] return missing @@ -311,7 +357,7 @@ using. Disabling file checksums is a new feature yum 3.2.17-ish """ try: - return pkg.verify(fast=self.setup.get('quick', False)) + return pkg.verify(fast=Bcfg2.Options.setup.quick) except TypeError: # Older Yum API return pkg.verify() @@ -436,9 +482,9 @@ package_fail = False qtext_versions = [] virt_pkg = False - pkg_checks = (self.pkg_checks and + pkg_checks = (Bcfg2.Options.setup.yum_pkg_checks and entry.get('pkg_checks', 'true').lower() == 'true') - pkg_verify = (self.pkg_verify and + pkg_verify = (Bcfg2.Options.setup.yum_pkg_verify and entry.get('pkg_verify', 'true').lower() == 'true') yum_group = False @@ -531,7 +577,7 @@ inst.get('verify_flags').lower().replace(' ', ',').split(',') else: - verify_flags = self.verify_flags + verify_flags = Bcfg2.Options.setup.yum_verify_flags if 'arch' in nevra: # If arch is specified use it to select the package @@ -580,8 +626,8 @@ "an RPM release") continue pkg_objs = [p for p in all_pkg_objs - if (p.version == nevra['version'] - and p.release == nevra['release'])] + if (p.version == nevra['version'] and + p.release == nevra['release'])] else: pkg_objs = self.yumbase.rpmdb.searchNevra(**short_yname(nevra)) if len(pkg_objs) == 0: @@ -615,7 +661,7 @@ nevra.get('release', 'any')) entry.set('current_version', "%s:%s-%s" % current_evr) entry.set('version', "%s:%s-%s" % wanted_evr) - if yum.compareEVR(current_evr, wanted_evr) == 1: + if rpmUtils.miscutils.compareEVR(current_evr, wanted_evr) == 1: entry.set("package_fail_action", "downgrade") else: entry.set("package_fail_action", "update") @@ -623,7 +669,7 @@ qtext_versions.append("U(%s)" % str(all_pkg_objs[0])) continue - if self.setup.get('quick', False): + if Bcfg2.Options.setup.quick: # Passed -q on the command line continue if not (pkg_verify and @@ -697,7 +743,7 @@ install_only = False if virt_pkg or \ - (install_only and not self.setup['kevlar']) or \ + (install_only and not Bcfg2.Options.setup.kevlar) or \ yum_group: # virtual capability supplied, we are probably dealing # with multiple packages of different names. This check @@ -820,8 +866,8 @@ self.yumbase.closeRpmDB() self.RefreshPackages() - rpm_display = RPMDisplay(self.logger) - yum_display = YumDisplay(self.logger) + rpm_display = RPMDisplay() + yum_display = YumDisplay() # Run the Yum Transaction try: rescode, restring = self.yumbase.buildTransaction() @@ -870,7 +916,7 @@ cleanup() - def Install(self, packages, states): # pylint: disable=R0912,R0914,R0915 + def Install(self, packages): # pylint: disable=R0912,R0914,R0915 """ Try and fix everything that Yum.VerifyPackages() found wrong for each Package Entry. This can result in individual RPMs being installed (for the first time), deleted, downgraded @@ -888,6 +934,7 @@ entry is set to True. """ self.logger.debug('Running Yum.Install()') + states = dict() install_pkgs = [] gpg_keys = [] upgrade_pkgs = [] @@ -905,8 +952,7 @@ # Remove extra instances. # Can not reverify because we don't have a package entry. if self.extra_instances is not None and len(self.extra_instances) > 0: - if (self.setup.get('remove') == 'all' or - self.setup.get('remove') == 'packages'): + if Bcfg2.Options.setup.remove in ['all', 'packages']: self.Remove(self.extra_instances) else: self.logger.info("The following extra package instances will " @@ -931,14 +977,17 @@ nevra2string(build_yname(pkg.get('name'), inst))) continue status = self.instance_status[inst] - if not status.get('installed', False) and self.do_install: + if not status.get('installed', False) and \ + Bcfg2.Options.setup.yum_install_missing: queue_pkg(pkg, inst, install_pkgs) - elif status.get('version_fail', False) and self.do_upgrade: + elif (status.get('version_fail', False) and + Bcfg2.Options.setup.yum_fix_version): if pkg.get("package_fail_action") == "downgrade": queue_pkg(pkg, inst, downgrade_pkgs) else: queue_pkg(pkg, inst, upgrade_pkgs) - elif status.get('verify_fail', False) and self.do_reinst: + elif (status.get('verify_fail', False) and + Bcfg2.Options.setup.yum_reinstall_broken): queue_pkg(pkg, inst, reinstall_pkgs) else: # Either there was no Install/Version/Verify @@ -1027,7 +1076,7 @@ self._runYumTransaction() - if not self.setup['kevlar']: + if not Bcfg2.Options.setup.kevlar: for pkg_entry in [p for p in packages if self.canVerify(p)]: self.logger.debug("Reverifying Failed Package %s" % pkg_entry.get('name')) @@ -1035,8 +1084,8 @@ self.VerifyPackage(pkg_entry, self.modlists.get(pkg_entry, [])) - for entry in [ent for ent in packages if states[ent]]: - self.modified.append(entry) + self.modified.extend(ent for ent in packages if states[ent]) + return states def Remove(self, packages): """ diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Compat.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Compat.py --- bcfg2-1.3.5/src/lib/Bcfg2/Compat.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Compat.py 2017-01-10 19:18:17.000000000 +0000 @@ -286,3 +286,9 @@ def cmp(a, b): """ Py3k implementation of cmp() """ return (a > b) - (a < b) + +# ast was introduced in python 2.6 +try: + from ast import literal_eval +except ImportError: + literal_eval = eval diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/DBSettings.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/DBSettings.py --- bcfg2-1.3.5/src/lib/Bcfg2/DBSettings.py 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/DBSettings.py 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,360 @@ +""" Django settings for the Bcfg2 server """ + +import os +import sys +import logging +import Bcfg2.Logger +import Bcfg2.Options + +try: + import django + import django.core.management + import django.conf + HAS_DJANGO = True +except ImportError: + HAS_DJANGO = False + +# required for reporting +try: + import south # pylint: disable=W0611 + HAS_SOUTH = True +except ImportError: + HAS_SOUTH = False + +settings = dict( # pylint: disable=C0103 + TIME_ZONE=None, + TEMPLATE_DEBUG=False, + DEBUG=False, + ALLOWED_HOSTS=['*'], + MEDIA_URL='/site_media/', + MANAGERS=(('Root', 'root'),), + ADMINS=(('Root', 'root'),), + # Language code for this installation. All choices can be found + # here: + # http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes + # http://blogs.law.harvard.edu/tech/stories/storyReader$15 + LANGUAGE_CODE='en-us', + SITE_ID=1, + INSTALLED_APPS=('django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.sites', + 'django.contrib.admin', + 'Bcfg2.Server'), + MEDIA_ROOT='', + STATIC_URL='/media/', + # TODO - make this unique + SECRET_KEY='eb5+y%oy-qx*2+62vv=gtnnxg1yig_odu0se5$h0hh#pc*lmo7', + TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader', + 'django.template.loaders.app_directories.Loader'), + MIDDLEWARE_CLASSES=( + 'django.middleware.common.CommonMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware'), + ROOT_URLCONF='Bcfg2.Reporting.urls', + AUTHENTICATION_BACKENDS=('django.contrib.auth.backends.ModelBackend'), + LOGIN_URL='/login', + SESSION_EXPIRE_AT_BROWSER_CLOSE=True, + TEMPLATE_DIRS=( + '/usr/share/python-support/python-django/django/contrib/admin/' + 'templates/'), + TEMPLATE_CONTEXT_PROCESSORS=( + 'django.contrib.auth.context_processors.auth', + 'django.core.context_processors.debug', + 'django.core.context_processors.i18n', + 'django.core.context_processors.media', + 'django.core.context_processors.request'), + DATABASE_ROUTERS=['Bcfg2.DBSettings.PerApplicationRouter'], + TEST_RUNNER='django.test.simple.DjangoTestSuiteRunner', + CACHES={ + 'default': { + 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', + } + }) + +if HAS_DJANGO and django.VERSION[0] == 1 and django.VERSION[1] >= 6: + settings['MIDDLEWARE_CLASSES'] += \ + ('django.contrib.admindocs.middleware.XViewMiddleware',) +elif HAS_SOUTH: + settings['MIDDLEWARE_CLASSES'] += \ + ('django.middleware.doc.XViewMiddleware',) + +if HAS_DJANGO and django.VERSION[0] == 1 and django.VERSION[1] >= 7: + settings['INSTALLED_APPS'] += ('Bcfg2.Reporting',) +elif HAS_SOUTH: + settings['INSTALLED_APPS'] += ('south', 'Bcfg2.Reporting') + settings['SOUTH_MIGRATION_MODULES'] = { + 'Reporting': 'Bcfg2.Reporting.south_migrations', + 'Server': 'Bcfg2.Server.south_migrations', + } +if 'BCFG2_LEGACY_MODELS' in os.environ: + settings['INSTALLED_APPS'] += ('Bcfg2.Server.Reports.reports',) + + +def finalize_django_config(opts=None, silent=False): + """ Perform final Django configuration """ + if opts is None: + opts = Bcfg2.Options.setup + settings['DATABASES'] = dict( + default=dict( + ENGINE="django.db.backends.%s" % opts.db_engine, + NAME=opts.db_name, + USER=opts.db_user, + PASSWORD=opts.db_password, + HOST=opts.db_host, + PORT=opts.db_port, + OPTIONS=opts.db_opts, + SCHEMA=opts.db_schema)) + + if hasattr(opts, "reporting_db_engine") and \ + opts.reporting_db_engine is not None: + settings['DATABASES']['Reporting'] = dict( + ENGINE="django.db.backends.%s" % opts.reporting_db_engine, + NAME=opts.reporting_db_name, + USER=opts.reporting_db_user, + PASSWORD=opts.reporting_db_password, + HOST=opts.reporting_db_host, + PORT=opts.reporting_db_port, + OPTIONS=opts.reporting_db_opts, + SCHEMA=opts.reporting_db_schema) + + settings['TIME_ZONE'] = opts.time_zone + + settings['TEMPLATE_DEBUG'] = settings['DEBUG'] = \ + opts.web_debug + if opts.web_debug: + print("Warning: Setting web_debug to True causes extraordinary " + "memory leaks. Only use this setting if you know what " + "you're doing.") + + if opts.web_prefix: + settings['MEDIA_URL'] = \ + opts.web_prefix.rstrip('/') + \ + settings['MEDIA_URL'] + + if opts.django_settings: + settings.update(opts.django_settings) + + logger = logging.getLogger() + + logger.debug("Finalizing Django settings: %s" % settings) + module = sys.modules[__name__] + for name, value in settings.items(): + setattr(module, name, value) + try: + django.conf.settings.configure(**settings) + if django.VERSION[0] == 1 and django.VERSION[1] >= 7: + django.setup() # pylint: disable=E1101 + except RuntimeError: + if not silent: + logger.warning("Failed to finalize Django settings: %s" % + sys.exc_info()[1]) + + +def sync_databases(**kwargs): + """ Synchronize all databases that we know about. """ + if django.VERSION[0] == 1 and django.VERSION[1] >= 7: + # Nothing needed here, it's all handled with migrate + return + + logger = logging.getLogger() + for database in settings['DATABASES']: + logger.debug("Syncing database %s" % (database)) + django.core.management.call_command("syncdb", database=database, + **kwargs) + + +def upgrade_to_django_migrations(database, logger): + """ + Get the migration state from south and move django migrations to + the same state by fake applying the same migration. + + Note: We cannot use south directly here, because this functions + runs on django-1.7 or higher, that is not supported by south. + """ + + last_migration = None + try: + # get latest south migration + cursor = django.db.connections[database].cursor() + cursor.cursor.execute('SELECT migration FROM south_migrationhistory') + applied_migrations = [name for (name,) in cursor.fetchall()] + last_migration = sorted(applied_migrations).pop() + except: # pylint: disable=W0702 + # django.db.DatabaseError is not working here, because we are + # using the low level api to interact directly with the database + logger.debug("No south migration detected for database: %s." % + database) + + if last_migration is not None: + # fake-apply matching django migrations + django.core.management.call_command( + "migrate", 'Reporting', last_migration, + database=database, fake=True) + + +def initial_django_migration(database): + """ Check if we ever executed an initial django migration. """ + from django.db.migrations import loader # pylint: disable=E0611 + loader = loader.MigrationLoader(django.db.connections[database]) + return len(loader.applied_migrations) == 0 + + +def migrate_databases(**kwargs): + """ Do South migrations on all databases that we know about. """ + logger = logging.getLogger() + for database in settings['DATABASES']: + logger.debug("Migrating database %s" % (database)) + if django.VERSION[0] == 1 and django.VERSION[1] >= 7: + if initial_django_migration(database): + logger.warning( + "No applied django migrations found for database %s. " + "Trying to get the state from south migration in case " + "you just upgraded your django version." % database) + upgrade_to_django_migrations(database, logger) + + django.core.management.call_command("migrate", database=database, + **kwargs) + + +def get_db_label(application): + """ Get the name of the database for a given Django "application". The + rule is that if a database with the same name as the application exists, + use it. Otherwise use the default. Returns a string suitible for use as a + key in the Django database settings dict """ + if application in settings['DATABASES']: + return application + + return 'default' + + +class PerApplicationRouter(object): + """ Django database router for redirecting different applications to their + own database """ + + def _db_per_app(self, model, **_): + """ If a database with the same name as the application exists, use it. + Otherwise use the default """ + return get_db_label(model._meta.app_label) # pylint: disable=W0212 + + def db_for_read(self, model, **hints): + """ Called when Django wants to find out what database to read from """ + return self._db_per_app(model, **hints) + + def db_for_write(self, model, **hints): + """ Called when Django wants to find out what database to write to """ + return self._db_per_app(model, **hints) + + def allow_relation(self, obj1, obj2, **_): + """ Called when Django wants to determine what relations to allow. Only + allow relations within an app """ + # pylint: disable=W0212 + return obj1._meta.app_label == obj2._meta.app_label + # pylint: enable=W0212 + + def allow_syncdb(self, *_): + """ Called when Django wants to determine which models to sync to a + given database. Take the cowards way out and sync all models to all + databases to allow for easy migrations. This method is replaced with + allow_migrate in django 1.7 and higher. """ + return True + + def allow_migrate(self, *_args, **_kwargs): + """ Called when Django wants to determine which migrations should + be run on a given database. Take the cowards way out and run all + migrations to all databases to allow for easy migrations. """ + return True + + +class _OptionContainer(object): + """ Container for options loaded at import-time to configure + databases """ + parse_first = True + options = [ + Bcfg2.Options.Common.repository, + Bcfg2.Options.PathOption( + '-W', '--web-config', cf=('reporting', 'config'), + default="/etc/bcfg2-web.conf", + action=Bcfg2.Options.ConfigFileAction, + help='Web interface configuration file'), + # default database options + Bcfg2.Options.Option( + cf=('database', 'engine'), default='sqlite3', + help='Database engine', dest='db_engine'), + Bcfg2.Options.RepositoryMacroOption( + cf=('database', 'name'), default='/etc/bcfg2.sqlite', + help="Database name", dest="db_name"), + Bcfg2.Options.Option( + cf=('database', 'user'), help='Database username', dest='db_user'), + Bcfg2.Options.Option( + cf=('database', 'password'), help='Database password', + dest='db_password'), + Bcfg2.Options.Option( + cf=('database', 'host'), help='Database host', dest='db_host'), + Bcfg2.Options.Option( + cf=('database', 'port'), help='Database port', dest='db_port'), + Bcfg2.Options.Option( + cf=('database', 'schema'), help='Database schema', + dest='db_schema', default='public'), + Bcfg2.Options.Option( + cf=('database', 'options'), help='Database options', + dest='db_opts', type=Bcfg2.Options.Types.literal_dict, + default=dict()), + # reporting database options + Bcfg2.Options.Option( + cf=('database', 'reporting_engine'), + help='Reporting database engine', dest='reporting_db_engine'), + Bcfg2.Options.Option( + cf=('database', 'reporting_name'), + default='/etc/reporting.sqlite', + help="Reporting database name", dest="reporting_db_name"), + Bcfg2.Options.Option( + cf=('database', 'reporting_user'), + help='Reporting database username', dest='reporting_db_user'), + Bcfg2.Options.Option( + cf=('database', 'reporting_password'), + help='Reporting database password', dest='reporting_db_password'), + Bcfg2.Options.Option( + cf=('database', 'reporting_host'), + help='Reporting database host', dest='reporting_db_host'), + Bcfg2.Options.Option( + cf=('database', 'reporting_port'), + help='Reporting database port', dest='reporting_db_port'), + Bcfg2.Options.Option( + cf=('database', 'reporting_schema'), + help='Reporting database schema', dest='reporting_db_schema', + default='public'), + Bcfg2.Options.Option( + cf=('database', 'reporting_options'), + help='Reporting database options', dest='reporting_db_opts', + type=Bcfg2.Options.Types.literal_dict, default=dict()), + # Django options + Bcfg2.Options.Option( + cf=('reporting', 'time_zone'), help='Django timezone'), + Bcfg2.Options.BooleanOption( + cf=('reporting', 'web_debug'), help='Django debug'), + Bcfg2.Options.Option( + cf=('reporting', 'web_prefix'), help='Web prefix'), + Bcfg2.Options.Option( + cf=('reporting', 'django_settings'), + help='Additional django settings', + type=Bcfg2.Options.Types.literal_dict, default=dict())] + + @staticmethod + def component_parsed_hook(opts): + """ Finalize the Django config after this component's options + are parsed. """ + if HAS_DJANGO: + finalize_django_config(opts=opts) + + @staticmethod + def options_parsed_hook(): + """ Finalize the Django config after all options are parsed. + This is added in case the DBSettings component isn't added + early enough in option parsing to be parsed in the 'early' + phase. Chances are good that things will break if that + happens, but we do our best to be a good citizen. """ + if HAS_DJANGO: + finalize_django_config(silent=True) + +Bcfg2.Options.get_parser().add_component(_OptionContainer) diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Encryption.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Encryption.py --- bcfg2-1.3.5/src/lib/Bcfg2/Encryption.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Encryption.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,228 +0,0 @@ -""" Bcfg2.Encryption provides a number of convenience methods for -handling encryption in Bcfg2. See :ref:`server-encryption` for more -details. """ - -import os -import sys -from M2Crypto import Rand -from M2Crypto.EVP import Cipher, EVPError -from Bcfg2.Compat import StringIO, md5, b64encode, b64decode - -#: Constant representing the encryption operation for -#: :class:`M2Crypto.EVP.Cipher`, which uses a simple integer. This -#: makes our code more readable. -ENCRYPT = 1 - -#: Constant representing the decryption operation for -#: :class:`M2Crypto.EVP.Cipher`, which uses a simple integer. This -#: makes our code more readable. -DECRYPT = 0 - -#: Default cipher algorithm. To get a full list of valid algorithms, -#: you can run:: -#: -#: openssl list-cipher-algorithms | grep -v ' => ' | \ -#: tr 'A-Z-' 'a-z_' | sort -u -ALGORITHM = "aes_256_cbc" - -#: Default initialization vector. For best security, you should use a -#: unique IV for each message. :func:`ssl_encrypt` does this in an -#: automated fashion. -IV = r'\0' * 16 - -#: The config file section encryption options and passphrases are -#: stored in -CFG_SECTION = "encryption" - -#: The config option used to store the algorithm -CFG_ALGORITHM = "algorithm" - -#: The config option used to store the decryption strictness -CFG_DECRYPT = "decrypt" - -Rand.rand_seed(os.urandom(1024)) - - -def _cipher_filter(cipher, instr): - """ M2Crypto reads and writes file-like objects, so this uses - StringIO to pass data through it """ - inbuf = StringIO(instr) - outbuf = StringIO() - while 1: - buf = inbuf.read() - if not buf: - break - outbuf.write(cipher.update(buf)) - outbuf.write(cipher.final()) - rv = outbuf.getvalue() - inbuf.close() - outbuf.close() - return rv - - -def str_encrypt(plaintext, key, iv=IV, algorithm=ALGORITHM, salt=None): - """ Encrypt a string with a key. For a higher-level encryption - interface, see :func:`ssl_encrypt`. - - :param plaintext: The plaintext data to encrypt - :type plaintext: string - :param key: The key to encrypt the data with - :type key: string - :param iv: The initialization vector - :type iv: string - :param algorithm: The cipher algorithm to use - :type algorithm: string - :param salt: The salt to use - :type salt: string - :returns: string - The decrypted data - """ - cipher = Cipher(alg=algorithm, key=key, iv=iv, op=ENCRYPT, salt=salt) - return _cipher_filter(cipher, plaintext) - - -def str_decrypt(crypted, key, iv=IV, algorithm=ALGORITHM): - """ Decrypt a string with a key. For a higher-level decryption - interface, see :func:`ssl_decrypt`. - - :param crypted: The raw binary encrypted data - :type crypted: string - :param key: The encryption key to decrypt with - :type key: string - :param iv: The initialization vector - :type iv: string - :param algorithm: The cipher algorithm to use - :type algorithm: string - :returns: string - The decrypted data - """ - cipher = Cipher(alg=algorithm, key=key, iv=iv, op=DECRYPT) - return _cipher_filter(cipher, crypted) - - -def ssl_decrypt(data, passwd, algorithm=ALGORITHM): - """ Decrypt openssl-encrypted data. This can decrypt data - encrypted by :func:`ssl_encrypt`, or ``openssl enc``. It performs - a base64 decode first if the data is base64 encoded, and - automatically determines the salt and initialization vector (both - of which are embedded in the encrypted data). - - :param data: The encrypted data (either base64-encoded or raw - binary) to decrypt - :type data: string - :param passwd: The password to use to decrypt the data - :type passwd: string - :param algorithm: The cipher algorithm to use - :type algorithm: string - :returns: string - The decrypted data - """ - # base64-decode the data - try: - data = b64decode(data) - except TypeError: - # we do not include the data in the error message, because one - # of the common causes of this is data that claims to be - # encrypted but is not. we don't want to include a plaintext - # secret in the error logs. - raise TypeError("Could not decode base64 data: %s" % - sys.exc_info()[1]) - salt = data[8:16] - # pylint: disable=E1101,E1121 - hashes = [md5(passwd + salt).digest()] - for i in range(1, 3): - hashes.append(md5(hashes[i - 1] + passwd + salt).digest()) - # pylint: enable=E1101,E1121 - key = hashes[0] + hashes[1] - iv = hashes[2] - - return str_decrypt(data[16:], key=key, iv=iv, algorithm=algorithm) - - -def ssl_encrypt(plaintext, passwd, algorithm=ALGORITHM, salt=None): - """ Encrypt data in a format that is openssl compatible. - - :param plaintext: The plaintext data to encrypt - :type plaintext: string - :param passwd: The password to use to encrypt the data - :type passwd: string - :param algorithm: The cipher algorithm to use - :type algorithm: string - :param salt: The salt to use. If none is provided, one will be - randomly generated. - :type salt: bytes - :returns: string - The base64-encoded, salted, encrypted string. - The string includes a trailing newline to make it fully - compatible with openssl command-line tools. - """ - if salt is None: - salt = Rand.rand_bytes(8) - - # pylint: disable=E1101,E1121 - hashes = [md5(passwd + salt).digest()] - for i in range(1, 3): - hashes.append(md5(hashes[i - 1] + passwd + salt).digest()) - # pylint: enable=E1101,E1121 - key = hashes[0] + hashes[1] - iv = hashes[2] - - crypted = str_encrypt(plaintext, key=key, salt=salt, iv=iv, - algorithm=algorithm) - return b64encode("Salted__" + salt + crypted) + "\n" - - -def get_algorithm(setup): - """ Get the cipher algorithm from the config file. This is used - in case someone uses the OpenSSL algorithm name (e.g., - "AES-256-CBC") instead of the M2Crypto name (e.g., "aes_256_cbc"), - and to handle errors in a sensible way and deduplicate this code. - - :param setup: The Bcfg2 option set to extract passphrases from - :type setup: Bcfg2.Options.OptionParser - :returns: dict - a dict of ````: ```` - """ - return setup.cfp.get(CFG_SECTION, CFG_ALGORITHM, - default=ALGORITHM).lower().replace("-", "_") - - -def get_passphrases(setup): - """ Get all candidate encryption passphrases from the config file. - - :param setup: The Bcfg2 option set to extract passphrases from - :type setup: Bcfg2.Options.OptionParser - :returns: dict - a dict of ````: ```` - """ - section = CFG_SECTION - if setup.cfp.has_section(section): - return dict([(o, setup.cfp.get(section, o)) - for o in setup.cfp.options(section) - if o not in [CFG_ALGORITHM, CFG_DECRYPT]]) - else: - return dict() - - -def bruteforce_decrypt(crypted, passphrases=None, setup=None, - algorithm=ALGORITHM): - """ Convenience method to decrypt the given encrypted string by - trying the given passphrases or all passphrases (as returned by - :func:`get_passphrases`) sequentially until one is found that - works. - - Either ``passphrases`` or ``setup`` must be provided. - - :param crypted: The data to decrypt - :type crypted: string - :param passphrases: The passphrases to try. - :type passphrases: list - :param setup: A Bcfg2 option set to extract passphrases from - :type setup: Bcfg2.Options.OptionParser - :param algorithm: The cipher algorithm to use - :type algorithm: string - :returns: string - The decrypted data - :raises: :class:`M2Crypto.EVP.EVPError`, if the data cannot be decrypted - """ - if passphrases is None: - passphrases = get_passphrases(setup).values() - for passwd in passphrases: - try: - return ssl_decrypt(crypted, passwd, algorithm=algorithm) - except EVPError: - pass - raise EVPError("Failed to decrypt") diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Logger.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Logger.py --- bcfg2-1.3.5/src/lib/Bcfg2/Logger.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Logger.py 2017-01-10 19:18:17.000000000 +0000 @@ -9,6 +9,7 @@ import struct import sys import termios +import Bcfg2.Options logging.raiseExceptions = 0 @@ -20,7 +21,7 @@ def __init__(self, fmt=None, datefmt=None): logging.Formatter.__init__(self, fmt, datefmt) - if sys.stdout.isatty(): + if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(): # now get termios info try: self.width = struct.unpack('hhhh', @@ -40,6 +41,8 @@ returns = [] line_len = self.width if isinstance(record.msg, str): + if len(record.args) != 0: + record.msg = record.msg % record.args for line in record.msg.split('\n'): if len(line) <= line_len: returns.append(line) @@ -150,8 +153,11 @@ logging.root.addHandler(console) -def add_syslog_handler(procname, syslog_facility, level=logging.DEBUG): +def add_syslog_handler(procname=None, syslog_facility='daemon', + level=logging.DEBUG): """Add a logging handler that logs as procname to syslog_facility.""" + if procname is None: + procname = Bcfg2.Options.get_parser().prog try: try: syslog = FragmentingSysLogHandler(procname, @@ -175,9 +181,9 @@ print("Failed to activate syslogging") -def add_file_handler(to_file, level=logging.DEBUG): - """Add a logging handler that logs to to_file.""" - filelog = logging.FileHandler(to_file) +def add_file_handler(level=logging.DEBUG): + """Add a logging handler that logs to a file.""" + filelog = logging.FileHandler(Bcfg2.Options.setup.logfile) try: filelog.set_name("file") # pylint: disable=E1101 except AttributeError: @@ -188,34 +194,127 @@ logging.root.addHandler(filelog) -def setup_logging(procname, to_console=True, to_syslog=True, - syslog_facility='daemon', level=0, to_file=None): +def default_log_level(): + """ Get the default log level, according to the configuration """ + if Bcfg2.Options.setup.debug: + return logging.DEBUG + elif Bcfg2.Options.setup.verbose: + return logging.INFO + else: + return logging.WARNING + + +def setup_logging(): """Setup logging for Bcfg2 software.""" if hasattr(logging, 'already_setup'): return + level = default_log_level() params = [] + to_console = True + if hasattr(Bcfg2.Options.setup, "daemon"): + if Bcfg2.Options.setup.daemon: + to_console = False + # if a command can be daemonized, but hasn't been, then we + # assume that they're running it in the foreground and thus + # want some more output. + clvl = min(level, logging.INFO) + else: + clvl = level if to_console: - if to_console is True: - to_console = logging.WARNING - if level == 0: - clvl = to_console - else: - clvl = min(to_console, level) params.append("%s to console" % logging.getLevelName(clvl)) - add_console_handler(clvl) - if to_syslog: - if level == 0: - slvl = logging.INFO - else: - slvl = min(level, logging.INFO) + add_console_handler(level=clvl) + + if hasattr(Bcfg2.Options.setup, "syslog") and Bcfg2.Options.setup.syslog: + slvl = min(level, logging.INFO) params.append("%s to syslog" % logging.getLevelName(slvl)) - add_syslog_handler(procname, syslog_facility, level=slvl) - if to_file is not None: - params.append("%s to %s" % (logging.getLevelName(level), to_file)) - add_file_handler(to_file, level=level) + add_syslog_handler(level=slvl) + + if Bcfg2.Options.setup.logfile: + params.append("%s to %s" % (logging.getLevelName(level), + Bcfg2.Options.setup.logfile)) + add_file_handler(level=level) logging.root.setLevel(logging.DEBUG) logging.root.debug("Configured logging: %s" % "; ".join(params)) logging.already_setup = True + + +class Debuggable(object): + """ Mixin to add a debugging interface to an object """ + + options = [] + + #: List of names of methods to be exposed as XML-RPC functions, if + #: applicable to the child class + __rmi__ = ['toggle_debug', 'set_debug'] + + #: How exposed XML-RPC functions should be dispatched to child + #: processes. + __child_rmi__ = __rmi__[:] + + def __init__(self, name=None): + """ + :param name: The name of the logger object to get. If none is + supplied, the full name of the class (including + module) will be used. + :type name: string + """ + if name is None: + name = "%s.%s" % (self.__class__.__module__, + self.__class__.__name__) + self.debug_flag = Bcfg2.Options.setup.debug + self.logger = logging.getLogger(name) + + def set_debug(self, debug): + """ Explicitly enable or disable debugging. + + :returns: bool - The new value of the debug flag + """ + self.debug_flag = debug + return debug + + def toggle_debug(self): + """ Turn debugging output on or off. + + :returns: bool - The new value of the debug flag + """ + return self.set_debug(not self.debug_flag) + + def debug_log(self, message, flag=None): + """ Log a message at the debug level. + + :param message: The message to log + :type message: string + :param flag: Override the current debug flag with this value + :type flag: bool + :returns: None + """ + if (flag is None and self.debug_flag) or flag: + self.logger.error(message) + + +class _OptionContainer(object): + """ Container for options loaded at import-time to configure + logging """ + options = [ + Bcfg2.Options.BooleanOption( + '-d', '--debug', help='Enable debugging output', + cf=('logging', 'debug')), + Bcfg2.Options.BooleanOption( + '-v', '--verbose', help='Enable verbose output', + cf=('logging', 'verbose')), + Bcfg2.Options.PathOption( + '-o', '--logfile', help='Set path of file log', + cf=('logging', 'path'))] + + @staticmethod + def options_parsed_hook(): + """ initialize settings from /etc/bcfg2-web.conf or + /etc/bcfg2.conf, or set up basic defaults. this lets + manage.py work in all cases """ + setup_logging() + + +Bcfg2.Options.get_parser().add_component(_OptionContainer) diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/manage.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/manage.py --- bcfg2-1.3.5/src/lib/Bcfg2/manage.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/manage.py 2017-01-10 19:18:17.000000000 +0000 @@ -1,14 +1,23 @@ #!/usr/bin/env python -from django.core.management import execute_manager -import imp +import os +import sys +import django +import Bcfg2.Options +import Bcfg2.DBSettings + try: - imp.find_module('settings') # Assumed to be in the same directory. + import Bcfg2.Server.models except ImportError: - import sys - sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__) - sys.exit(1) + pass -import settings +parser = Bcfg2.Options.get_parser() +parser.add_options([Bcfg2.Options.PositionalArgument('django_command', nargs='*')]) +parser.parse() if __name__ == "__main__": - execute_manager(settings) + if django.VERSION[0] == 1 and django.VERSION[1] >= 6: + from django.core.management import execute_from_command_line + execute_from_command_line(sys.argv[:1] + Bcfg2.Options.setup.django_command) + else: + from django.core.management import execute_manager + execute_manager(Bcfg2.DBSettings.settings) diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Options/Actions.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options/Actions.py --- bcfg2-1.3.5/src/lib/Bcfg2/Options/Actions.py 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options/Actions.py 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,187 @@ +""" Custom argparse actions """ + +import sys +import argparse +from Bcfg2.Options.Parser import get_parser, OptionParserException +from Bcfg2.Options.Options import _debug + +__all__ = ["ConfigFileAction", "ComponentAction", "PluginsAction"] + + +class FinalizableAction(argparse.Action): + """ A FinalizableAction requires some additional action to be taken + when storing the value, and as a result must be finalized if the + default value is used.""" + + def __init__(self, *args, **kwargs): + argparse.Action.__init__(self, *args, **kwargs) + self._final = False + + def finalize(self, parser, namespace): + """ Finalize a default value by calling the action callable. """ + if not self._final: + self.__call__(parser, namespace, getattr(namespace, self.dest, + self.default)) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, values) + self._final = True + + +class ComponentAction(FinalizableAction): + """ ComponentAction automatically imports classes and modules + based on the value of the option, and automatically collects + options from the loaded classes and modules. It cannot be used by + itself, but must be subclassed, with either :attr:`mapping` or + :attr:`bases` overridden. See + :class:`Bcfg2.Options.PluginsAction` for an example. + + ComponentActions expect to be given a list of class names. If + :attr:`bases` is overridden, then it will attempt to import those + classes from identically named modules within the given bases. + For instance: + + .. code-block:: python + + class FooComponentAction(Bcfg2.Options.ComponentAction): + bases = ["Bcfg2.Server.Foo"] + + + class FooLoader(object): + options = [ + Bcfg2.Options.Option( + "--foo", + type=Bcfg2.Options.Types.comma_list, + default=["One"], + action=FooComponentAction)] + + If "--foo One,Two,Three" were given on the command line, then + ``FooComponentAction`` would attempt to import + ``Bcfg2.Server.Foo.One.One``, ``Bcfg2.Server.Foo.Two.Two``, and + ``Bcfg2.Server.Foo.Three.Three``. (It would also call + :func:`Bcfg2.Options.Parser.add_component` with each of those + classes as arguments.) + + Note that, although ComponentActions expect lists of components + (by default; this can be overridden by setting :attr:`islist`), + you must still explicitly specify a ``type`` argument to the + :class:`Bcfg2.Options.Option` constructor to split the value into + a list. + + Note also that, unlike other actions, the default value of a + ComponentAction option does not need to be the actual literal + final value. (I.e., you don't have to import + ``Bcfg2.Server.Foo.One.One`` and set it as the default in the + example above; the string "One" suffices.) + """ + + #: A list of parent modules where modules or classes should be + #: imported from. + bases = [] + + #: A mapping of `` => `` that components will be + #: loaded from. This can be used to permit much more complex + #: behavior than just a list of :attr:`bases`. + mapping = dict() + + #: If ``module`` is True, then only the module will be loaded, not + #: a class from the module. For instance, in the example above, + #: ``FooComponentAction`` would attempt instead to import + #: ``Bcfg2.Server.Foo.One``, ``Bcfg2.Server.Foo.Two``, and + #: ``Bcfg2.Server.Foo.Three``. + module = False + + #: By default, ComponentActions expect a list of components to + #: load. If ``islist`` is False, then it will only expect a + #: single component. + islist = True + + #: If ``fail_silently`` is True, then failures to import modules + #: or classes will not be logged. This is useful when the default + #: is to import everything, some of which are expected to fail. + fail_silently = False + + def __init__(self, *args, **kwargs): + if self.mapping and not self.islist: + if 'choices' not in kwargs: + kwargs['choices'] = self.mapping.keys() + FinalizableAction.__init__(self, *args, **kwargs) + + def _import(self, module, name): + """ Import the given name from the given module, handling + errors """ + try: + return getattr(__import__(module, fromlist=[name]), name) + except (AttributeError, ImportError): + msg = "Failed to load %s from %s: %s" % (name, module, + sys.exc_info()[1]) + if not self.fail_silently: + print(msg) + else: + _debug(msg) + return None + + def _load_component(self, name): + """ Import a single class or module, adding it as a component to + the parser. + + :param name: The name of the class or module to import, without + the base prepended. + :type name: string + :returns: the imported class or module + """ + cls = None + if self.mapping and name in self.mapping: + cls = self.mapping[name] + elif "." in name: + cls = self._import(*name.rsplit(".", 1)) + else: + for base in self.bases: + if self.module: + mod = base + else: + mod = "%s.%s" % (base, name) + cls = self._import(mod, name) + if cls is not None: + break + if cls: + get_parser().add_component(cls) + elif not self.fail_silently: + raise OptionParserException("Could not load component %s" % name) + return cls + + def __call__(self, parser, namespace, values, option_string=None): + if values is None: + result = None + else: + if self.islist: + result = [] + for val in values: + cls = self._load_component(val) + if cls is not None: + result.append(cls) + else: + result = self._load_component(values) + FinalizableAction.__call__(self, parser, namespace, result, + option_string=option_string) + + +class ConfigFileAction(FinalizableAction): + """ ConfigFileAction automatically loads and parses a + supplementary config file (e.g., ``bcfg2-web.conf`` or + ``bcfg2-lint.conf``). """ + + def __call__(self, parser, namespace, values, option_string=None): + if values: + parser.add_config_file(self.dest, values, reparse=False) + else: + _debug("No config file passed for %s" % self) + FinalizableAction.__call__(self, parser, namespace, values, + option_string=option_string) + + +class PluginsAction(ComponentAction): + """ :class:`Bcfg2.Options.ComponentAction` subclass for loading + Bcfg2 server plugins. """ + bases = ['Bcfg2.Server.Plugins'] + fail_silently = True diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Options/Common.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options/Common.py --- bcfg2-1.3.5/src/lib/Bcfg2/Options/Common.py 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options/Common.py 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,129 @@ +""" Common options used in multiple different contexts. """ + +from Bcfg2.Utils import classproperty +from Bcfg2.Options import Types +from Bcfg2.Options.Actions import PluginsAction, ComponentAction +from Bcfg2.Options.Parser import repository as _repository_option +from Bcfg2.Options import Option, PathOption, BooleanOption + +__all__ = ["Common"] + + +class ReportingTransportAction(ComponentAction): + """ :class:`Bcfg2.Options.ComponentAction` that loads a single + reporting transport from :mod:`Bcfg2.Reporting.Transport`. """ + islist = False + bases = ['Bcfg2.Reporting.Transport'] + + +class ReportingStorageAction(ComponentAction): + """ :class:`Bcfg2.Options.ComponentAction` that loads a single + reporting storage driver from :mod:`Bcfg2.Reporting.Storage`. """ + islist = False + bases = ['Bcfg2.Reporting.Storage'] + + +class Common(object): + """ Common options used in multiple different contexts. """ + _plugins = None + _filemonitor = None + _reporting_storage = None + _reporting_transport = None + + @classproperty + def plugins(cls): + """ Load a list of Bcfg2 server plugins """ + if cls._plugins is None: + cls._plugins = Option( + cf=('server', 'plugins'), + type=Types.comma_list, help="Server plugin list", + action=PluginsAction, + default=['Bundler', 'Cfg', 'Metadata', 'Pkgmgr', 'Rules', + 'SSHbase']) + return cls._plugins + + @classproperty + def filemonitor(cls): + """ Load a single Bcfg2 file monitor (from + :attr:`Bcfg2.Server.FileMonitor.available`) """ + if cls._filemonitor is None: + import Bcfg2.Server.FileMonitor + + class FileMonitorAction(ComponentAction): + """ ComponentAction for loading a single FAM backend + class """ + islist = False + mapping = Bcfg2.Server.FileMonitor.available + + cls._filemonitor = Option( + cf=('server', 'filemonitor'), action=FileMonitorAction, + default='default', help='Server file monitoring driver') + return cls._filemonitor + + @classproperty + def reporting_storage(cls): + """ Load a Reporting storage backend """ + if cls._reporting_storage is None: + cls._reporting_storage = Option( + cf=('reporting', 'storage'), dest="reporting_storage", + help='Reporting storage engine', + action=ReportingStorageAction, default='DjangoORM') + return cls._reporting_storage + + @classproperty + def reporting_transport(cls): + """ Load a Reporting transport backend """ + if cls._reporting_transport is None: + cls._reporting_transport = Option( + cf=('reporting', 'transport'), dest="reporting_transport", + help='Reporting transport', + action=ReportingTransportAction, default='DirectStore') + return cls._reporting_transport + + #: Set the path to the Bcfg2 repository + repository = _repository_option + + #: Daemonize process, storing PID + daemon = PathOption( + '-D', '--daemon', help="Daemonize process, storing PID") + + #: Run interactively, prompting the user for each change + interactive = BooleanOption( + "-I", "--interactive", + help='Run interactively, prompting the user for each change') + + #: Log to syslog + syslog = BooleanOption( + cf=('logging', 'syslog'), help="Log to syslog", default=True) + + #: Server location + location = Option( + '-S', '--server', cf=('components', 'bcfg2'), + default='https://localhost:6789', metavar='', + help="Server location") + + #: Communication password + password = Option( + '-x', '--password', cf=('communication', 'password'), + metavar='', help="Communication Password") + + #: Path to SSL CA certificate + ssl_ca = PathOption( + cf=('communication', 'ca'), help='Path to SSL CA Cert') + + #: Communication protocol + protocol = Option( + cf=('communication', 'protocol'), default='xmlrpc/tlsv1', + choices=['xmlrpc/ssl', 'xmlrpc/tlsv1'], + help='Communication protocol to use.') + + #: Default Path paranoid setting + default_paranoid = Option( + cf=('mdata', 'paranoid'), dest="default_paranoid", default='true', + choices=['true', 'false'], help='Default Path paranoid setting') + + #: Client timeout + client_timeout = Option( + "-t", "--timeout", type=float, default=90.0, dest="client_timeout", + cf=('communication', 'timeout'), + help='Set the client XML-RPC timeout') diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Options/__init__.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options/__init__.py --- bcfg2-1.3.5/src/lib/Bcfg2/Options/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options/__init__.py 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,10 @@ +""" Bcfg2 options parsing. """ + +# pylint: disable=W0611,W0401 +from Bcfg2.Options import Types +from Bcfg2.Options.Options import * +from Bcfg2.Options.Common import * +from Bcfg2.Options.Parser import * +from Bcfg2.Options.Actions import * +from Bcfg2.Options.Subcommands import * +from Bcfg2.Options.OptionGroups import * diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Options/OptionGroups.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options/OptionGroups.py --- bcfg2-1.3.5/src/lib/Bcfg2/Options/OptionGroups.py 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options/OptionGroups.py 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,219 @@ +""" Option grouping classes """ + +import re +import copy +import fnmatch +from Bcfg2.Options import Option +from itertools import chain + +__all__ = ["OptionGroup", "ExclusiveOptionGroup", "Subparser", + "WildcardSectionGroup"] + + +class _OptionContainer(list): + """ Parent class of all option groups """ + + def list_options(self): + """ Get a list of all options contained in this group, + including options contained in option groups in this group, + and so on. """ + return list(chain(*[o.list_options() for o in self])) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, list.__repr__(self)) + + def add_to_parser(self, parser): + """ Add this option group to a :class:`Bcfg2.Options.Parser` + object. """ + for opt in self: + opt.add_to_parser(parser) + + +class OptionGroup(_OptionContainer): + """ Generic option group that is used only to organize options. + This uses :meth:`argparse.ArgumentParser.add_argument_group` + behind the scenes. """ + + def __init__(self, *items, **kwargs): + r""" + :param \*args: Child options + :type \*args: Bcfg2.Options.Option + :param title: The title of the option group + :type title: string + :param description: A longer description of the option group + :param description: string + """ + _OptionContainer.__init__(self, items) + self.title = kwargs.pop('title') + self.description = kwargs.pop('description', None) + + def add_to_parser(self, parser): + group = parser.add_argument_group(self.title, self.description) + _OptionContainer.add_to_parser(self, group) + + +class ExclusiveOptionGroup(_OptionContainer): + """ Option group that ensures that only one argument in the group + is present. This uses + :meth:`argparse.ArgumentParser.add_mutually_exclusive_group` + behind the scenes.""" + + def __init__(self, *items, **kwargs): + r""" + :param \*args: Child options + :type \*args: Bcfg2.Options.Option + :param required: Exactly one argument in the group *must* be + specified. + :type required: boolean + """ + _OptionContainer.__init__(self, items) + self.required = kwargs.pop('required', False) + + def add_to_parser(self, parser): + _OptionContainer.add_to_parser( + self, parser.add_mutually_exclusive_group(required=self.required)) + + +class Subparser(_OptionContainer): + """ Option group that adds options in it to a subparser. This + uses a lot of functionality tied to `argparse Sub-commands + `_. + + The subcommand string itself is stored in the + :attr:`Bcfg2.Options.setup` namespace as ``subcommand``. + + This is commonly used with :class:`Bcfg2.Options.Subcommand` + groups. + """ + + _subparsers = dict() + + def __init__(self, *items, **kwargs): + r""" + :param \*args: Child options + :type \*args: Bcfg2.Options.Option + :param name: The name of the subparser. Required. + :type name: string + :param help: A help message for the subparser + :param help: string + """ + self.name = kwargs.pop('name') + self.help = kwargs.pop('help', None) + _OptionContainer.__init__(self, items) + + def __repr__(self): + return "%s %s(%s)" % (self.__class__.__name__, + self.name, + list.__repr__(self)) + + def add_to_parser(self, parser): + if parser not in self._subparsers: + self._subparsers[parser] = parser.add_subparsers(dest='subcommand') + subparser = self._subparsers[parser].add_parser(self.name, + help=self.help) + _OptionContainer.add_to_parser(self, subparser) + + +class WildcardSectionGroup(_OptionContainer, Option): + """WildcardSectionGroups contain options that may exist in + several different sections of the config that match a glob. It + works by creating options on the fly to match the sections + described in the glob. For example, consider: + + .. code-block:: python + + options = [ + Bcfg2.Options.WildcardSectionGroup( + Bcfg2.Options.Option(cf=("myplugin:*", "number"), type=int), + Bcfg2.Options.Option(cf=("myplugin:*", "description"))] + + If the config file contained ``[myplugin:foo]`` and + ``[myplugin:bar]`` sections, then this would automagically create + options for each of those. The end result would be: + + .. code-block:: python + + >>> Bcfg2.Options.setup + Namespace(myplugin_bar_description='Bar description', myplugin_myplugin_bar_number=2, myplugin_myplugin_foo_description='Foo description', myplugin_myplugin_foo_number=1, myplugin_sections=['myplugin:foo', 'myplugin:bar']) + + All options must have the same section glob. + + The options are stored in an automatically-generated destination + given by:: + +
_ + + ```` is the original `dest + `_ of the + option. ``
`` is the section that it's found in. + ```` is automatically generated from the section glob. + (This can be overridden with the constructor.) Both ``
`` + and ```` have had all consecutive characters disallowed in + Python variable names replaced with underscores. + + This group stores an additional option, the sections themselves, + in an option given by ``sections``. + """ + + #: Regex to automatically get a destination for this option + _dest_re = re.compile(r'(\A(_|[^A-Za-z])+)|((_|[^A-Za-z0-9])+)') + + def __init__(self, *items, **kwargs): + r""" + :param \*args: Child options + :type \*args: Bcfg2.Options.Option + :param prefix: The prefix to use for options generated by this + option group. By default this is generated + automatically from the config glob; see above + for details. + :type prefix: string + :param dest: The destination for the list of known sections + that match the glob. + :param dest: string + """ + _OptionContainer.__init__(self, []) + self._section_glob = items[0].cf[0] + # get a default destination + self._prefix = kwargs.get("prefix", + self._dest_re.sub('_', self._section_glob)) + Option.__init__(self, dest=kwargs.get('dest', + self._prefix + "sections")) + self.option_templates = items + + def list_options(self): + return [self] + _OptionContainer.list_options(self) + + def from_config(self, cfp): + sections = [] + for section in cfp.sections(): + if fnmatch.fnmatch(section, self._section_glob): + sections.append(section) + newopts = [] + for opt_tmpl in self.option_templates: + option = copy.deepcopy(opt_tmpl) + option.cf = (section, option.cf[1]) + option.dest = "%s%s_%s" % (self._prefix, + self._dest_re.sub('_', section), + option.dest) + newopts.append(option) + self.extend(newopts) + for parser in self.parsers: + parser.add_options(newopts) + return sections + + def add_to_parser(self, parser): + Option.add_to_parser(self, parser) + _OptionContainer.add_to_parser(self, parser) + + def __eq__(self, other): + return (_OptionContainer.__eq__(self, other) and + self.option_templates == getattr(other, "option_templates", + None)) + + def __repr__(self): + if len(self) == 0: + return "%s(%s)" % (self.__class__.__name__, + ", ".join(".".join(o.cf) + for o in self.option_templates)) + else: + return _OptionContainer.__repr__(self) diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Options/Options.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options/Options.py --- bcfg2-1.3.5/src/lib/Bcfg2/Options/Options.py 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options/Options.py 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,473 @@ +"""Base :class:`Bcfg2.Options.Option` object to represent an option. + +Unlike options in :mod:`argparse`, an Option object does not need to +be associated with an option parser; it exists on its own. +""" + +import argparse +import copy +import fnmatch +import os +import sys + +from Bcfg2.Options import Types +from Bcfg2.Compat import ConfigParser + + +__all__ = ["Option", "BooleanOption", "RepositoryMacroOption", "PathOption", + "PositionalArgument", "_debug"] + +unit_test = False # pylint: disable=C0103 + + +def _debug(msg): + """ Option parsing happens before verbose/debug have been set -- + they're options, after all -- so option parsing verbosity is + enabled by changing this to True. The verbosity here is primarily + of use to developers. """ + if unit_test: + print("DEBUG: %s" % msg) + elif os.environ.get('BCFG2_OPTIONS_DEBUG', '0').lower() in ["true", "yes", + "on", "1"]: + sys.stderr.write("%s\n" % msg) + + +#: A dict that records a mapping of argparse action name (e.g., +#: "store_true") to the argparse Action class for it. See +#: :func:`_get_action_class` +_action_map = dict() # pylint: disable=C0103 + + +def _get_action_class(action_name): + """ Given an argparse action name (e.g., "store_true"), get the + related :class:`argparse.Action` class. The mapping that stores + this information in :mod:`argparse` itself is unfortunately + private, so it's an implementation detail that we shouldn't depend + on. So we just instantiate a dummy parser, add a dummy argument, + and determine the class that way. """ + if (isinstance(action_name, type) and + issubclass(action_name, argparse.Action)): + return action_name + if action_name not in _action_map: + action = argparse.ArgumentParser().add_argument(action_name, + action=action_name) + _action_map[action_name] = action.__class__ + return _action_map[action_name] + + +class Option(object): + """ Representation of an option that can be specified on the + command line, as an environment variable, or in a config + file. Precedence is in that order; that is, an option specified on + the command line takes precendence over an option given by the + environment, which takes precedence over an option specified in + the config file. """ + + #: Keyword arguments that should not be passed on to the + #: :class:`argparse.ArgumentParser` constructor + _local_args = ['cf', 'env', 'man'] + + def __init__(self, *args, **kwargs): + """ See :meth:`argparse.ArgumentParser.add_argument` for a + full list of accepted parameters. + + In addition to supporting all arguments and keyword arguments + from :meth:`argparse.ArgumentParser.add_argument`, several + additional keyword arguments are allowed. + + :param cf: A tuple giving the section and option name that + this argument can be referenced as in the config + file. The option name may contain the wildcard + '*', in which case the value will be a dict of all + options matching the glob. (To use a wildcard in + the section, use a + :class:`Bcfg2.Options.WildcardSectionGroup`.) + :type cf: tuple + :param env: An environment variable that the value of this + option can be taken from. + :type env: string + :param man: A detailed description of the option that will be + used to populate automatically-generated manpages. + :type man: string + """ + #: The options by which this option can be called. + #: (Coincidentally, this is also the list of arguments that + #: will be passed to + #: :meth:`argparse.ArgumentParser.add_argument` when this + #: option is added to a parser.) As a result, ``args`` can be + #: tested to see if this argument can be given on the command + #: line at all, or if it is purely a config file option. + self.args = args + self._kwargs = kwargs + + #: The tuple giving the section and option name for this + #: option in the config file + self.cf = None # pylint: disable=C0103 + + #: The environment variable that this option can take its + #: value from + self.env = None + + #: A detailed description of this option that will be used in + #: man pages. + self.man = None + + #: A list of :class:`Bcfg2.Options.Parser` objects to which + #: this option has been added. (There will be more than one + #: parser if this option is added to a subparser, for + #: instance.) + self.parsers = [] + + #: A dict of :class:`Bcfg2.Options.Parser` -> + #: :class:`argparse.Action` that gives the actions that + #: resulted from adding this option to each parser that it was + #: added to. If this option cannot be specified on the + #: command line (i.e., it only takes its value from the config + #: file), then this will be empty. + self.actions = dict() + + self.type = self._kwargs.get("type") + self.help = self._kwargs.get("help") + self._default = self._kwargs.get("default") + for kwarg in self._local_args: + setattr(self, kwarg, self._kwargs.pop(kwarg, None)) + if self.args: + # cli option + self._dest = None + else: + action_cls = _get_action_class(self._kwargs.get('action', 'store')) + # determine the name of this option. use, in order, the + # 'name' kwarg; the option name; the environment variable + # name. + self._dest = None + if 'dest' in self._kwargs: + self._dest = self._kwargs.pop('dest') + elif self.env is not None: + self._dest = self.env + elif self.cf is not None: + self._dest = self.cf[1] + self._dest = self._dest.lower().replace("-", "_") + kwargs = copy.copy(self._kwargs) + kwargs.pop("action", None) + self.actions[None] = action_cls(self._dest, self._dest, **kwargs) + + def __repr__(self): + sources = [] + if self.args: + sources.extend(self.args) + if self.cf: + sources.append("%s.%s" % self.cf) + if self.env: + sources.append("$" + self.env) + spec = ["sources=%s" % sources, "default=%s" % self.default, + "%d parsers" % len(self.parsers)] + return '%s(%s: %s)' % (self.__class__.__name__, + self.dest, ", ".join(spec)) + + def list_options(self): + """ List options contained in this option. This exists to + provide a consistent interface with + :class:`Bcfg2.Options.OptionGroup` """ + return [self] + + def finalize(self, namespace): + """ Finalize the default value for this option. This is used + with actions (such as :class:`Bcfg2.Options.ComponentAction`) + that allow you to specify a default in a different format than + its final storage format; this can be called after it has been + determined that the default will be used (i.e., the option is + not given on the command line or in the config file) to store + the appropriate default value in the appropriate format.""" + for parser, action in self.actions.items(): + if hasattr(action, "finalize"): + if parser: + _debug("Finalizing %s for %s" % (self, parser)) + else: + _debug("Finalizing %s" % self) + action.finalize(parser, namespace) + + @property + def _type_func(self): + """get a function for converting a value to the option type. + + this always returns a callable, even when ``type`` is None. + """ + if self.type: + return self.type + else: + return lambda x: x + + def from_config(self, cfp): + """ Get the value of this option from the given + :class:`ConfigParser.ConfigParser`. If it is not found in the + config file, the default is returned. (If there is no + default, None is returned.) + + :param cfp: The config parser to get the option value from + :type cfp: ConfigParser.ConfigParser + :returns: The default value + """ + if not self.cf: + return None + if '*' in self.cf[1]: + if cfp.has_section(self.cf[0]): + # build a list of known options in this section, and + # exclude them + exclude = set() + for parser in self.parsers: + exclude.update(o.cf[1] + for o in parser.option_list + if o.cf and o.cf[0] == self.cf[0]) + rv = dict([(o, cfp.get(self.cf[0], o)) + for o in fnmatch.filter(cfp.options(self.cf[0]), + self.cf[1]) + if o not in exclude]) + else: + rv = {} + else: + try: + rv = self._type_func(self.get_config_value(cfp)) + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + rv = None + _debug("Getting value of %s from config file(s): %s" % (self, rv)) + return rv + + def get_config_value(self, cfp): + """fetch a value from the config file. + + This is passed the config parser. Its result is passed to the + type function for this option. It can be overridden to, e.g., + handle boolean options. + """ + return cfp.get(*self.cf) + + def get_environ_value(self, value): + """fetch a value from the environment. + + This is passed the raw value from the environment variable, + and its result is passed to the type function for this + option. It can be overridden to, e.g., handle boolean options. + """ + return value + + def default_from_config(self, cfp): + """ Set the default value of this option from the config file + or from the environment. + + :param cfp: The config parser to get the option value from + :type cfp: ConfigParser.ConfigParser + """ + if self.env and self.env in os.environ: + self.default = self._type_func( + self.get_environ_value(os.environ[self.env])) + _debug("Setting the default of %s from environment: %s" % + (self, self.default)) + else: + val = self.from_config(cfp) + if val is not None: + _debug("Setting the default of %s from config: %s" % + (self, val)) + self.default = val + + def _get_default(self): + """ Getter for the ``default`` property """ + return self._default + + def _set_default(self, value): + """ Setter for the ``default`` property """ + self._default = value + for action in self.actions.values(): + action.default = value + + #: The current default value of this option + default = property(_get_default, _set_default) + + def _get_dest(self): + """ Getter for the ``dest`` property """ + return self._dest + + def _set_dest(self, value): + """ Setter for the ``dest`` property """ + self._dest = value + for action in self.actions.values(): + action.dest = value + + def early_parsing_hook(self, early_opts): # pylint: disable=C0111 + """Hook called at the end of early option parsing. + + This can be used to save option values for macro fixup. + """ + pass + + #: The namespace destination of this option (see `dest + #: `_) + dest = property(_get_dest, _set_dest) + + def add_to_parser(self, parser): + """ Add this option to the given parser. + + :param parser: The parser to add the option to. + :type parser: Bcfg2.Options.Parser + :returns: argparse.Action + """ + self.parsers.append(parser) + if self.args: + # cli option + _debug("Adding %s to %s as a CLI option" % (self, parser)) + action = parser.add_argument(*self.args, **self._kwargs) + if not self._dest: + self._dest = action.dest + if self._default: + action.default = self._default + self.actions[parser] = action + else: + # else, config file-only option + _debug("Adding %s to %s as a config file-only option" % + (self, parser)) + + +class RepositoryMacroOption(Option): + """Option that does translation of ```` macros. + + Macro translation is done on the fly instead of just fixing up all + values at the end of parsing because macro expansion needs to be + done before path canonicalization for + :class:`Bcfg2.Options.Options.PathOption`. + """ + repository = None + + def __init__(self, *args, **kwargs): + self._original_type = kwargs.pop('type', lambda x: x) + kwargs['type'] = self._type + kwargs.setdefault('metavar', '') + Option.__init__(self, *args, **kwargs) + + def early_parsing_hook(self, early_opts): + if hasattr(early_opts, "repository"): + if self.__class__.repository is None: + _debug("Setting repository to %s for %s" % + (early_opts.repository, self.__class__.__name__)) + self.__class__.repository = early_opts.repository + else: + _debug("Repository is already set for %s" % self.__class__) + + def _get_default(self): + """ Getter for the ``default`` property """ + if not hasattr(self._default, "replace"): + return self._default + else: + return self._type(self._default) + + default = property(_get_default, Option._set_default) + + def transform_value(self, value): + """transform the value after macro expansion. + + this can be overridden to further transform the value set by + the user *after* macros are expanded, but before the user's + ``type`` function is applied. principally exists for + PathOption to canonicalize the path. + """ + return value + + def _type(self, value): + """Type function that fixes up macros.""" + if self.__class__.repository is None: + return value + else: + return self._original_type(self.transform_value( + value.replace("", self.__class__.repository))) + + +class PathOption(RepositoryMacroOption): + """Shortcut for options that expect a path argument. + + Uses :meth:`Bcfg2.Options.Types.path` to transform the argument + into a canonical path. The type of a path option can also be + overridden to return a file-like object. For example: + + .. code-block:: python + + options = [ + Bcfg2.Options.PathOption( + "--input", type=argparse.FileType('r'), + help="The input file")] + + PathOptions also do translation of ```` macros. + """ + def transform_value(self, value): + return Types.path(value) + + +class _BooleanOptionAction(argparse.Action): + """BooleanOptionAction sets a boolean value. + + - if None is passed, store the default + - if the option_string is not None, then the option was passed on the + command line, thus store the opposite of the default (this is the + argparse store_true and store_false behavior) + - if a boolean value is passed, use that + + Makes a copy of the initial default, because otherwise the default + can be changed by config file settings or environment + variables. For instance, if a boolean option that defaults to True + was set to False in the config file, specifying the option on the + CLI would then set it back to True. + + Defined here instead of :mod:`Bcfg2.Options.Actions` because otherwise + there is a circular import Options -> Actions -> Parser -> Options. + """ + + def __init__(self, *args, **kwargs): + argparse.Action.__init__(self, *args, **kwargs) + self.original = self.default + + def __call__(self, parser, namespace, values, option_string=None): + if values is None: + setattr(namespace, self.dest, self.default) + elif option_string is not None: + setattr(namespace, self.dest, not self.original) + else: + setattr(namespace, self.dest, bool(values)) + + +class BooleanOption(Option): + """ Shortcut for boolean options. The default is False, but this + can easily be overridden: + + .. code-block:: python + + options = [ + Bcfg2.Options.PathOption( + "--dwim", default=True, help="Do What I Mean")] + """ + def __init__(self, *args, **kwargs): + kwargs.setdefault('action', _BooleanOptionAction) + kwargs.setdefault('nargs', 0) + kwargs.setdefault('default', False) + Option.__init__(self, *args, **kwargs) + + def get_environ_value(self, value): + if value.lower() in ["false", "no", "off", "0"]: + return False + elif value.lower() in ["true", "yes", "on", "1"]: + return True + else: + raise ValueError("Invalid boolean value %s" % value) + + def get_config_value(self, cfp): + """fetch a value from the config file. + + This is passed the config parser. Its result is passed to the + type function for this option. It can be overridden to, e.g., + handle boolean options. + """ + return cfp.getboolean(*self.cf) + + +class PositionalArgument(Option): + """ Shortcut for positional arguments. """ + def __init__(self, *args, **kwargs): + if 'metavar' not in kwargs: + kwargs['metavar'] = '<%s>' % args[0] + Option.__init__(self, *args, **kwargs) diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Options/Parser.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options/Parser.py --- bcfg2-1.3.5/src/lib/Bcfg2/Options/Parser.py 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options/Parser.py 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,417 @@ +"""The option parser.""" + +import argparse +import os +import sys + +from Bcfg2.version import __version__ +from Bcfg2.Compat import ConfigParser +from Bcfg2.Options import Option, PathOption, _debug + +__all__ = ["setup", "OptionParserException", "Parser", "get_parser", + "new_parser"] + + +#: The repository option. This is specified here (and imported into +#: :module:`Bcfg2.Options.Common`) rather than vice-versa due to +#: circular imports. +repository = PathOption( # pylint: disable=C0103 + '-Q', '--repository', cf=('server', 'repository'), + default='/var/lib/bcfg2', help="Server repository path") + + +#: A module-level :class:`argparse.Namespace` object that stores all +#: configuration for Bcfg2. +setup = argparse.Namespace(version=__version__, # pylint: disable=C0103 + name="Bcfg2", + uri='http://trac.mcs.anl.gov/projects/bcfg2') + + +class OptionParserException(Exception): + """ Base exception raised for generic option parser errors """ + + +class Parser(argparse.ArgumentParser): + """ The Bcfg2 option parser. Most interfaces should not need to + instantiate a parser, but should instead use + :func:`Bcfg2.Options.get_parser` to get the parser that already + exists.""" + + #: Option for specifying the path to the Bcfg2 config file + configfile = PathOption('-C', '--config', + env="BCFG2_CONFIG_FILE", + help="Path to configuration file", + default="/etc/bcfg2.conf") + + #: Verbose version string that is printed if executed with --version + _version_string = "%s %s on Python %s" % ( + os.path.basename(sys.argv[0]), + __version__, + ".".join(str(v) for v in sys.version_info[0:3])) + + #: Builtin options that apply to all commands + options = [configfile, + Option('--version', help="Print the version and exit", + action="version", version=_version_string), + Option('-E', '--encoding', metavar='', + default='UTF-8', help="Encoding of config files", + cf=('components', 'encoding'))] + + #: Flag used in unit tests to disable actual config file reads + unit_test = False + + def __init__(self, **kwargs): + """ See :class:`argparse.ArgumentParser` for a full list of + accepted parameters. + + In addition to supporting all arguments and keyword arguments + from :class:`argparse.ArgumentParser`, several additional + keyword arguments are allowed. + + :param components: A list of components to add to the parser. + :type components: list + :param namespace: The namespace to store options in. Default + is :attr:`Bcfg2.Options.setup`. + :type namespace: argparse.Namespace + :param add_base_options: Whether or not to add the options in + :attr:`Bcfg2.Options.Parser.options` + to the parser. Setting this to False + is default for subparsers. Default is + True. + :type add_base_options: bool + """ + self._cfp = ConfigParser.ConfigParser() + components = kwargs.pop('components', []) + + #: The namespace options will be stored in. + self.namespace = kwargs.pop('namespace', setup) + if self.namespace is None: + self.namespace = setup + add_base_options = kwargs.pop('add_base_options', True) + + #: Flag to indicate that this is the pre-parsing 'early' run + #: for important options like database settings that must be + #: loaded before other components can be. + self._early = kwargs.pop('early', False) + + if 'add_help' not in kwargs: + kwargs['add_help'] = add_base_options + argparse.ArgumentParser.__init__(self, **kwargs) + + #: Whether or not parsing has completed on all current options. + self.parsed = False + + #: The argument list that was parsed. + self.argv = None + + #: Components that have been added to the parser + self.components = [] + + #: Options that have been added to the parser + self.option_list = [] + self._defaults_set = [] + self._config_files = [] + if add_base_options: + self.add_component(self) + if components: + for component in components: + self.add_component(component) + + def _check_duplicate_cf(self, option): + """Check for a duplicate config file option.""" + + def add_options(self, options): + """ Add an explicit list of options to the parser. When + possible, prefer :func:`Bcfg2.Options.Parser.add_component` to + add a whole component instead.""" + _debug("Adding options: %s" % options) + self.parsed = False + for option in options: + if option not in self.option_list: + # check for duplicates + if (hasattr(option, "env") and option.env and + option.env in [o.env for o in self.option_list]): + raise OptionParserException( + "Duplicate environment variable option: %s" % + option.env) + if (hasattr(option, "cf") and option.cf and + option.cf in [o.cf for o in self.option_list]): + raise OptionParserException( + "Duplicate config file option: %s" % (option.cf,)) + + self.option_list.extend(option.list_options()) + option.add_to_parser(self) + for opt in option.list_options(): + opt.default_from_config(self._cfp) + self._defaults_set.append(opt) + + def add_component(self, component): + """ Add a component (and all of its options) to the + parser. """ + if component not in self.components: + _debug("Adding component %s to %s" % (component, self)) + self.components.append(component) + if hasattr(component, "options"): + self.add_options(getattr(component, "options")) + + def _set_defaults_from_config(self): + """ Set defaults from the config file for all options that can + come from the config file, but haven't yet had their default + set """ + _debug("Setting defaults on all options") + for opt in self.option_list: + if opt not in self._defaults_set: + opt.default_from_config(self._cfp) + self._defaults_set.append(opt) + + def _parse_config_options(self): + """ populate the namespace with default values for any options + that aren't already in the namespace (i.e., options without + CLI arguments) """ + _debug("Parsing config file-only options") + for opt in self.option_list[:]: + if not opt.args and opt.dest not in self.namespace: + value = opt.default + if value: + for _, action in opt.actions.items(): + _debug("Setting config file-only option %s to %s" % + (opt, value)) + action(self, self.namespace, value) + else: + _debug("Setting config file-only option %s to %s" % + (opt, value)) + setattr(self.namespace, opt.dest, value) + + def _finalize(self): + """ Finalize the value of any options that require that + additional post-processing step. (Mostly + :class:`Bcfg2.Options.Actions.ComponentAction` subclasses.) + """ + _debug("Finalizing options") + for opt in self.option_list[:]: + opt.finalize(self.namespace) + + def _reset_namespace(self): + """ Delete all options from the namespace except for a few + predefined values and config file options. """ + self.parsed = False + _debug("Resetting namespace") + for attr in dir(self.namespace): + if (not attr.startswith("_") and + attr not in ['uri', 'version', 'name'] and + attr not in self._config_files): + _debug("Deleting %s" % attr) + delattr(self.namespace, attr) + + def _parse_early_options(self): + """Parse early options. + + Early options are options that need to be parsed before other + options for some reason. These fall into two basic cases: + + 1. Database options, which need to be parsed so that Django + modules can be imported, since Django configuration is all + done at import-time; + 2. The repository (``-Q``) option, so that ```` + macros in other options can be resolved. + """ + _debug("Option parsing phase 2: Parse early options") + early_opts = argparse.Namespace() + early_parser = Parser(add_help=False, namespace=early_opts, + early=True) + + # add the repo option so we can resolve + # macros + early_parser.add_options([repository]) + + early_components = [] + for component in self.components: + if getattr(component, "parse_first", False): + early_components.append(component) + early_parser.add_component(component) + early_parser.parse(self.argv) + + _debug("Fixing up macros in early options") + for attr_name in dir(early_opts): + if not attr_name.startswith("_"): + attr = getattr(early_opts, attr_name) + if hasattr(attr, "replace"): + setattr(early_opts, attr_name, + attr.replace("", + early_opts.repository)) + + _debug("Early parsing complete, calling hooks") + for component in early_components: + if hasattr(component, "component_parsed_hook"): + _debug("Calling component_parsed_hook on %s" % component) + getattr(component, "component_parsed_hook")(early_opts) + _debug("Calling early parsing hooks; early options: %s" % + early_opts) + for option in self.option_list: + option.early_parsing_hook(early_opts) + + def add_config_file(self, dest, cfile, reparse=True): + """ Add a config file, which triggers a full reparse of all + options. """ + if dest not in self._config_files: + _debug("Adding new config file %s for %s" % (cfile, dest)) + self._reset_namespace() + self._cfp.read([cfile]) + self._defaults_set = [] + self._set_defaults_from_config() + if reparse: + self._parse_config_options() + self._config_files.append(dest) + + def reparse(self, argv=None): + """ Reparse options after they have already been parsed. + + :param argv: The argument list to parse. By default, + :attr:`Bcfg2.Options.Parser.argv` is reused. + (I.e., the argument list that was initially + parsed.) + :type argv: list + """ + _debug("Reparsing all options") + self._reset_namespace() + self.parse(argv or self.argv) + + def parse(self, argv=None): + """ Parse options. + + :param argv: The argument list to parse. By default, + ``sys.argv[1:]`` is used. This is stored in + :attr:`Bcfg2.Options.Parser.argv` for reuse by + :func:`Bcfg2.Options.Parser.reparse`. + :type argv: list + """ + _debug("Parsing options") + if argv is None: + argv = sys.argv[1:] # pragma: nocover + if self.parsed and self.argv == argv: + _debug("Returning already parsed namespace") + return self.namespace + self.argv = argv + + # phase 1: get and read config file + _debug("Option parsing phase 1: Get and read main config file") + bootstrap_parser = argparse.ArgumentParser(add_help=False) + self.configfile.add_to_parser(bootstrap_parser) + self.configfile.default_from_config(self._cfp) + bootstrap = bootstrap_parser.parse_known_args(args=self.argv)[0] + + # check whether the specified bcfg2.conf exists + if not self.unit_test and not os.path.exists(bootstrap.config): + self.error("Could not read %s" % bootstrap.config) + self.add_config_file(self.configfile.dest, bootstrap.config, + reparse=False) + + # phase 2: re-parse command line for early options; currently, + # that's database options + if not self._early: + self._parse_early_options() + else: + _debug("Skipping parsing phase 2 in early mode") + + # phase 3: re-parse command line, loading additional + # components, until all components have been loaded. On each + # iteration, set defaults from config file/environment + # variables + _debug("Option parsing phase 3: Main parser loop") + # _set_defaults_from_config must be called before _parse_config_options + # This is due to a tricky interaction between the two methods: + # + # (1) _set_defaults_from_config does what its name implies, it updates + # the "default" property of each Option based on the value that exists + # in the config. + # + # (2) _parse_config_options will look at each option and set it to the + # default value that is _currently_ defined. If the option does not + # exist in the namespace, it will be added. The method carefully + # avoids overwriting the value of an option that is already defined in + # the namespace. + # + # Thus, if _set_defaults_from_config has not been called yet when + # _parse_config_options is called, all config file options will get set + # to their hardcoded defaults. This process defines the options in the + # namespace and _parse_config_options will never look at them again. + # + # we have to do the parsing in two loops: first, we squeeze as + # much data out of the config file as we can to ensure that + # all config file settings are read before we use any default + # values. then we can start looking at the command line. + while not self.parsed: + self.parsed = True + self._set_defaults_from_config() + self._parse_config_options() + self.parsed = False + remaining = [] + while not self.parsed: + self.parsed = True + _debug("Parsing known arguments") + try: + _, remaining = self.parse_known_args(args=self.argv, + namespace=self.namespace) + except OptionParserException: + self.error(sys.exc_info()[1]) + self._set_defaults_from_config() + self._parse_config_options() + self._finalize() + if len(remaining) and not self._early: + self.error("Unknown options: %s" % " ".join(remaining)) + + # phase 4: call post-parsing hooks + if not self._early: + _debug("Option parsing phase 4: Call hooks") + for component in self.components: + if hasattr(component, "options_parsed_hook"): + _debug("Calling post-parsing hook on %s" % component) + getattr(component, "options_parsed_hook")() + + return self.namespace + + +#: A module-level :class:`Bcfg2.Options.Parser` object that is used +#: for all parsing +_parser = Parser() # pylint: disable=C0103 + + +def new_parser(): + """Create a new :class:`Bcfg2.Options.Parser` object. + + The new object can be retrieved with + :func:`Bcfg2.Options.get_parser`. This is useful for unit + testing. + """ + global _parser + _parser = Parser() + + +def get_parser(description=None, components=None, namespace=None): + """Get an existing :class:`Bcfg2.Options.Parser` object. + + A Parser is created at the module level when :mod:`Bcfg2.Options` + is imported. If any arguments are given, then the existing parser + is modified before being returned. + + :param description: Set the parser description + :type description: string + :param components: Load the given components in the parser + :type components: list + :param namespace: Use the given namespace instead of + :attr:`Bcfg2.Options.setup` + :type namespace: argparse.Namespace + :returns: Bcfg2.Options.Parser object + """ + if Parser.unit_test: + return Parser(description=description, components=components, + namespace=namespace) + elif (description or components or namespace): + if description: + _parser.description = description + if components is not None: + for component in components: + _parser.add_component(component) + if namespace: + _parser.namespace = namespace + return _parser diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Options/Subcommands.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options/Subcommands.py --- bcfg2-1.3.5/src/lib/Bcfg2/Options/Subcommands.py 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options/Subcommands.py 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,267 @@ +""" Classes to make it easier to create commands with large numbers of +subcommands (e.g., bcfg2-admin, bcfg2-info). """ + +import re +import cmd +import sys +import copy +import shlex +import logging + +from Bcfg2.Compat import StringIO +from Bcfg2.Options import PositionalArgument, _debug +from Bcfg2.Options.OptionGroups import Subparser +from Bcfg2.Options.Parser import Parser, setup as master_setup + +__all__ = ["Subcommand", "CommandRegistry"] + + +class Subcommand(object): + """ Base class for subcommands. This must be subclassed to create + commands. + + Specifically, you must override + :func:`Bcfg2.Options.Subcommand.run`. You may want to override: + + * The docstring, which will be used as the short help. + * :attr:`Bcfg2.Options.Subcommand.options` + * :attr:`Bcfg2.Options.Subcommand.help` + * :attr:`Bcfg2.Options.Subcommand.interactive` + * + * :func:`Bcfg2.Options.Subcommand.shutdown` + + You should not need to override + :func:`Bcfg2.Options.Subcommand.__call__` or + :func:`Bcfg2.Options.Subcommand.usage`. + + A ``Subcommand`` subclass constructor must not take any arguments. + """ + + #: Options this command takes + options = [] + + #: Longer help message + help = None + + #: Whether or not to expose this command in an interactive + #: :class:`cmd.Cmd` shell, if one is used. (``bcfg2-info`` uses + #: one, ``bcfg2-admin`` does not.) + interactive = True + + #: Whether or not to expose this command as command line parameter + #: or only in an interactive :class:`cmd.Cmd` shell. + only_interactive = False + + #: Additional aliases for the command. The contents of the list gets + #: added to the default command name (the lowercased class name) + aliases = [] + + _ws_re = re.compile(r'\s+', flags=re.MULTILINE) + + def __init__(self): + self.core = None + description = "%s: %s" % (self.__class__.__name__.lower(), + self.__class__.__doc__) + + #: The :class:`Bcfg2.Options.Parser` that will be used to + #: parse options if this subcommand is called from an + #: interactive :class:`cmd.Cmd` shell. + self.parser = Parser( + prog=self.__class__.__name__.lower(), + description=description, + components=[self], + add_base_options=False, + epilog=self.help) + self._usage = None + + #: A :class:`logging.Logger` that can be used to produce + #: logging output for this command. + self.logger = logging.getLogger(self.__class__.__name__.lower()) + + def __call__(self, args=None): + """ Perform option parsing and other tasks necessary to + support running ``Subcommand`` objects as part of a + :class:`cmd.Cmd` shell. You should not need to override + ``__call__``. + + :param args: Arguments given in the interactive shell + :type args: list of strings + :returns: The return value of :func:`Bcfg2.Options.Subcommand.run` + """ + if args is not None: + self.parser.namespace = copy.copy(master_setup) + self.parser.parsed = False + alist = shlex.split(args) + try: + setup = self.parser.parse(alist) + except SystemExit: + return sys.exc_info()[1].code + return self.run(setup) + else: + return self.run(master_setup) + + def usage(self): + """ Get the short usage message. """ + if self._usage is None: + sio = StringIO() + self.parser.print_usage(file=sio) + usage = self._ws_re.sub(' ', sio.getvalue()).strip()[7:] + doc = self._ws_re.sub(' ', getattr(self, "__doc__") or "").strip() + if not doc: + self._usage = usage + else: + self._usage = "%s - %s" % (usage, doc) + return self._usage + + def run(self, setup): + """ Run the command. + + :param setup: A namespace giving the options for this command. + This must be used instead of + :attr:`Bcfg2.Options.setup` because this command + may have been called from an interactive + :class:`cmd.Cmd` shell, and thus has its own + option parser and its own (private) namespace. + ``setup`` is guaranteed to contain all of the + options in the global + :attr:`Bcfg2.Options.setup` namespace, in + addition to any local options given to this + command from the interactive shell. + :type setup: argparse.Namespace + """ + raise NotImplementedError # pragma: nocover + + def shutdown(self): + """ Perform any necessary shutdown tasks for this command This + is called to when the program exits (*not* when this command + is finished executing). """ + pass # pragma: nocover + + +class Help(Subcommand): + """List subcommands and usage, or get help on a specific subcommand.""" + options = [PositionalArgument("command", nargs='?')] + + # the interactive shell has its own help + interactive = False + + def __init__(self, registry): + Subcommand.__init__(self) + self._registry = registry + + def run(self, setup): + commands = dict((name, cmd) + for (name, cmd) in self._registry.commands.items() + if not cmd.only_interactive) + if setup.command: + try: + commands[setup.command].parser.print_help() + return 0 + except KeyError: + print("No such command: %s" % setup.command) + return 1 + for command in sorted(commands.keys()): + print(commands[command].usage()) + + +class CommandRegistry(object): + """A ``CommandRegistry`` is used to register subcommands and provides + a single interface to run them. It's also used by + :class:`Bcfg2.Options.Subcommands.Help` to produce help messages + for all available commands. + """ + + def __init__(self): + #: A dict of registered commands. Keys are the class names, + #: lowercased (i.e., the command names), and values are instances + #: of the command objects. + self.commands = dict() + + #: A list of options that should be added to the option parser + #: in order to handle registered subcommands. + self.subcommand_options = [] + + #: the help command + self.help = Help(self) + self.register_command(self.help) + + def runcommand(self): + """ Run the single command named in + ``Bcfg2.Options.setup.subcommand``, which is where + :class:`Bcfg2.Options.Subparser` groups store the + subcommand. """ + _debug("Running subcommand %s" % master_setup.subcommand) + try: + return self.commands[master_setup.subcommand].run(master_setup) + finally: + self.shutdown() + + def shutdown(self): + """Perform shutdown tasks. + + This calls the ``shutdown`` method of the subcommand that was + run. + """ + _debug("Shutting down subcommand %s" % master_setup.subcommand) + self.commands[master_setup.subcommand].shutdown() + + def register_command(self, cls_or_obj): + """ Register a single command. + + :param cls_or_obj: The command class or object to register + :type cls_or_obj: type or Subcommand + :returns: An instance of ``cmdcls`` + """ + if isinstance(cls_or_obj, type): + cmdcls = cls_or_obj + cmd_obj = cmdcls() + else: + cmd_obj = cls_or_obj + cmdcls = cmd_obj.__class__ + names = [cmdcls.__name__.lower()] + if cmdcls.aliases: + names.extend(cmdcls.aliases) + + for name in names: + self.commands[name] = cmd_obj + + if not cmdcls.only_interactive: + # py2.5 can't mix *magic and non-magical keyword args, thus + # the **dict(...) + self.subcommand_options.append( + Subparser(*cmdcls.options, **dict(name=name, + help=cmdcls.__doc__))) + if issubclass(self.__class__, cmd.Cmd) and cmdcls.interactive: + setattr(self, "do_%s" % name, cmd_obj) + setattr(self, "help_%s" % name, cmd_obj.parser.print_help) + return cmd_obj + + def register_commands(self, candidates, parent=Subcommand): + """ Register all subcommands in ``candidates`` against the + :class:`Bcfg2.Options.CommandRegistry` subclass given in + ``registry``. A command is registered if and only if: + + * It is a subclass of the given ``parent`` (by default, + :class:`Bcfg2.Options.Subcommand`); + * It is not the parent class itself; and + * Its name does not start with an underscore. + + :param registry: The :class:`Bcfg2.Options.CommandRegistry` + subclass against which commands will be + registered. + :type registry: Bcfg2.Options.CommandRegistry + :param candidates: A list of objects that will be considered for + registration. Only objects that meet the + criteria listed above will be registered. + :type candidates: list + :param parent: Specify a parent class other than + :class:`Bcfg2.Options.Subcommand` that all + registered commands must subclass. + :type parent: type + """ + for attr in candidates: + if (isinstance(attr, type) and + issubclass(attr, parent) and + attr != parent and + not attr.__name__.startswith("_")): + self.register_command(attr) diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Options/Types.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options/Types.py --- bcfg2-1.3.5/src/lib/Bcfg2/Options/Types.py 1970-01-01 00:00:00.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options/Types.py 2017-01-10 19:18:17.000000000 +0000 @@ -0,0 +1,107 @@ +""" :mod:`Bcfg2.Options` provides a number of useful types for use +with the :class:`Bcfg2.Options.Option` constructor. """ + +import os +import re +import pwd +import grp +from Bcfg2.Compat import literal_eval + +_COMMA_SPLIT_RE = re.compile(r'\s*,\s*') + + +def path(value): + """ A generic path. ``~`` will be expanded with + :func:`os.path.expanduser` and the absolute resulting path will be + used. This does *not* ensure that the path exists. """ + return os.path.abspath(os.path.expanduser(value)) + + +def comma_list(value): + """ Split a comma-delimited list, with optional whitespace around + the commas.""" + if value == '': + return [] + return _COMMA_SPLIT_RE.split(value) + + +def colon_list(value): + """ Split a colon-delimited list. Whitespace is not allowed + around the colons. """ + if value == '': + return [] + return value.split(':') + + +def literal_dict(value): + """ literally evaluate the option in order to allow for arbitrarily nested + dictionaries """ + return literal_eval(value) + + +def anchored_regex_list(value): + """ Split an option string on whitespace and compile each element as + an anchored regex """ + try: + return [re.compile('^' + x + '$') for x in re.split(r'\s+', value)] + except re.error: + raise ValueError("Not a list of regexes", value) + + +def octal(value): + """ Given an octal string, get an integer representation. """ + return int(value, 8) + + +def username(value): + """ Given a username or numeric UID, get a numeric UID. The user + must exist.""" + try: + return int(value) + except ValueError: + return int(pwd.getpwnam(value)[2]) + + +def groupname(value): + """ Given a group name or numeric GID, get a numeric GID. The + user must exist.""" + try: + return int(value) + except ValueError: + return int(grp.getgrnam(value)[2]) + + +def timeout(value): + """ Convert the value into a float or None. """ + if value is None: + return value + rv = float(value) # pass ValueError up the stack + if rv <= 0: + return None + return rv + + +# pylint: disable=C0103 +_bytes_multipliers = dict(k=1, + m=2, + g=3, + t=4) +_suffixes = "".join(_bytes_multipliers.keys()).lower() +_suffixes += _suffixes.upper() +_bytes_re = re.compile(r'(?P\d+)(?P[%s])?' % _suffixes) +# pylint: enable=C0103 + + +def size(value): + """ Given a number of bytes in a human-readable format (e.g., + '512m', '2g'), get the absolute number of bytes as an integer. + """ + mat = _bytes_re.match(value) + if not mat: + raise ValueError("Not a valid size", value) + rvalue = int(mat.group("value")) + mult = mat.group("multiplier") + if mult: + return rvalue * (1024 ** _bytes_multipliers[mult.lower()]) + else: + return rvalue diff -Nru bcfg2-1.3.5/src/lib/Bcfg2/Options.py bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options.py --- bcfg2-1.3.5/src/lib/Bcfg2/Options.py 2014-09-05 12:54:48.000000000 +0000 +++ bcfg2-1.4.0~pre2+git141-g6d40dace6358/src/lib/Bcfg2/Options.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1397 +0,0 @@ -"""Option parsing library for utilities.""" - -import copy -import getopt -import inspect -import os -import re -import shlex -import sys -import grp -import pwd -import Bcfg2.Client.Tools -from Bcfg2.Compat import ConfigParser -from Bcfg2.version import __version__ - - -class OptionFailure(Exception): - """ raised when malformed Option objects are instantiated """ - pass - -DEFAULT_CONFIG_LOCATION = '/etc/bcfg2.conf' -DEFAULT_INSTALL_PREFIX = '/usr' - - -class DefaultConfigParser(ConfigParser.ConfigParser): - """ A config parser that can be used to query options with default - values in the event that the option is not found """ - - def __init__(self, *args, **kwargs): - """Make configuration options case sensitive""" - ConfigParser.ConfigParser.__init__(self, *args, **kwargs) - self.optionxform = str - - def get(self, section, option, **kwargs): - """ convenience method for getting config items """ - default = None - if 'default' in kwargs: - default = kwargs['default'] - del kwargs['default'] - try: - return ConfigParser.ConfigParser.get(self, section, option, - **kwargs) - except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): - if default is not None: - return default - else: - raise - - def getboolean(self, section, option, **kwargs): - """ convenience method for getting boolean config items """ - default = None - if 'default' in kwargs: - default = kwargs['default'] - del kwargs['default'] - try: - return ConfigParser.ConfigParser.getboolean(self, section, - option, **kwargs) - except (ConfigParser.NoSectionError, ConfigParser.NoOptionError, - ValueError): - if default is not None: - return default - else: - raise - - -class Option(object): - """ a single option, which might be read from the command line, - environment, or config file """ - - # pylint: disable=C0103,R0913 - def __init__(self, desc, default, cmd=None, odesc=False, - env=False, cf=False, cook=False, long_arg=False, - deprecated_cf=None): - self.desc = desc - self.default = default - self.cmd = cmd - self.long = long_arg - if not self.long: - if cmd and (cmd[0] != '-' or len(cmd) != 2): - raise OptionFailure("Poorly formed command %s" % cmd) - elif cmd and not cmd.startswith('--'): - raise OptionFailure("Poorly formed command %s" % cmd) - self.odesc = odesc - self.env = env - self.cf = cf - self.deprecated_cf = deprecated_cf - self.boolean = False - if not odesc and not cook and isinstance(self.default, bool): - self.boolean = True - self.cook = cook - self.value = None - # pylint: enable=C0103,R0913 - - def get_cooked_value(self, value): - """ get the value of this option after performing any option - munging specified in the 'cook' keyword argument to the - constructor """ - if self.boolean: - return True - if self.cook: - return self.cook(value) - else: - return value - - def __str__(self): - rv = ["%s: " % self.__class__.__name__, self.desc] - if self.cmd or self.cf: - rv.append(" (") - if self.cmd: - if self.odesc: - if self.long: - rv.append("%s=%s" % (self.cmd, self.odesc)) - else: - rv.append("%s %s" % (self.cmd, self.odesc)) - else: - rv.append("%s" % self.cmd) - - if self.cf: - if self.cmd: - rv.append("; ") - rv.append("[%s].%s" % self.cf) - if self.cmd or self.cf: - rv.append(")") - if hasattr(self, "value"): - rv.append(": %s" % self.value) - return "".join(rv) - - def buildHelpMessage(self): - """ build the help message for this option """ - vals = [] - if not self.cmd: - return '' - if self.odesc: - if self.long: - vals.append("%s=%s" % (self.cmd, self.odesc)) - else: - vals.append("%s %s" % (self.cmd, self.odesc)) - else: - vals.append(self.cmd) - vals.append(self.desc) - return " %-28s %s\n" % tuple(vals) - - def buildGetopt(self): - """ build a string suitable for describing this short option - to getopt """ - gstr = '' - if self.long: - return gstr - if self.cmd: - gstr = self.cmd[1] - if self.odesc: - gstr += ':' - return gstr - - def buildLongGetopt(self): - """ build a string suitable for describing this long option to - getopt """ - if self.odesc: - return self.cmd[2:] + '=' - else: - return self.cmd[2:] - - def parse(self, opts, rawopts, configparser=None): - """ parse a single option. try parsing the data out of opts - (the results of getopt), rawopts (the raw option string), the - environment, and finally the config parser. either opts or - rawopts should be provided, but not both """ - if self.cmd and opts: - # Processing getopted data - optinfo = [opt[1] for opt in opts if opt[0] == self.cmd] - if optinfo: - if optinfo[0]: - self.value = self.get_cooked_value(optinfo[0]) - else: - self.value = True - return - if self.cmd and self.cmd in rawopts: - if self.odesc: - data = rawopts[rawopts.index(self.cmd) + 1] - else: - data = True - self.value = self.get_cooked_value(data) - return - # No command line option found - if self.env and self.env in os.environ: - self.value = self.get_cooked_value(os.environ[self.env]) - return - if self.cf and configparser: - try: - self.value = self.get_cooked_value(configparser.get(*self.cf)) - return - except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): - pass - if self.deprecated_cf: - try: - self.value = self.get_cooked_value( - configparser.get(*self.deprecated_cf)) - print("Warning: [%s] %s is deprecated, use [%s] %s instead" - % (self.deprecated_cf[0], self.deprecated_cf[1], - self.cf[0], self.cf[1])) - return - except (ConfigParser.NoSectionError, - ConfigParser.NoOptionError): - pass - - # Default value not cooked - self.value = self.default - - -class OptionSet(dict): - """ a set of Option objects that interfaces with getopt and - DefaultConfigParser to populate a dict of