We could stop accepting core files for large crashes with something like this:
$ bzr diff === modified file 'daisy/submit_core.py' --- daisy/submit_core.py 2016-05-24 16:29:14 +0000 +++ daisy/submit_core.py 2016-05-24 16:44:33 +0000 @@ -117,8 +117,10 @@ t_size = os.path.getsize(t.name) msg = '%s has a %i byte core file' % (oops_id, t_size) logger.info(msg) - # Don't set a content_length (that we don't have) to force a chunked - # transfer. + if t_size > 100000: + msg = 'Not writing extra large core file for %s.' % (oops_id) + logger.info(msg) + return False _cached_swift.put_object(bucket, oops_id, t, content_length=t_size) except IOError, e: swift_delete_ignoring_error(_cached_swift, bucket, oops_id) @@ -233,6 +235,7 @@ if written: return message else: + # don't want this to increment if it is a large core file metrics.meter('storage_write_error') return None
We could stop accepting core files for large crashes with something like this:
$ bzr diff submit_ core.py' core.py 2016-05-24 16:29:14 +0000 core.py 2016-05-24 16:44:33 +0000 getsize( t.name)
logger. info(msg)
_ cached_ swift.put_ object( bucket, oops_id, t, content_ length= t_size)
swift_ delete_ ignoring_ error(_ cached_ swift, bucket, oops_id)
metrics. meter(' storage_ write_error' )
=== modified file 'daisy/
--- daisy/submit_
+++ daisy/submit_
@@ -117,8 +117,10 @@
t_size = os.path.
msg = '%s has a %i byte core file' % (oops_id, t_size)
- # Don't set a content_length (that we don't have) to force a chunked
- # transfer.
+ if t_size > 100000:
+ msg = 'Not writing extra large core file for %s.' % (oops_id)
+ logger.info(msg)
+ return False
except IOError, e:
@@ -233,6 +235,7 @@
if written:
return message
else:
+ # don't want this to increment if it is a large core file
return None