diff -Nru dotnet8-8.0.100-8.0.0~rc2/build.proj dotnet8-8.0.100-8.0.0/build.proj
--- dotnet8-8.0.100-8.0.0~rc2/build.proj 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/build.proj 2023-11-13 13:20:34.000000000 +0000
@@ -4,6 +4,7 @@
+
@@ -57,6 +58,83 @@
+
+
+
+
+
+
+
+
+
+ $([System.IO.Path]::GetFileName('%(SymbolsTarball.Identity)'))
+ $(Filename.Split('.')[1])
+ $(ArtifactsTmpDir)Symbols
+ $(UnifiedSymbolsLayout)/$(RepositoryName)
+
+
+
+
+
+
+
+
+
+
+
+ $(OutputPath)dotnet-symbols-all-$(MicrosoftSourceBuildIntermediateInstallerVersion)-$(TargetRid).tar.gz
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ $(ArtifactsTmpDir)SdkSymbols
+ $(OutputPath)dotnet-symbols-sdk-$(MicrosoftSourceBuildIntermediateInstallerVersion)-$(TargetRid).tar.gz
+ $(ArtifactsTmpDir)Sdk
+ %(SdkTarballItem.Identity)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -45,50 +47,52 @@
+
+
-
-
- %(LinuxRid.Identity)
-
-
-
-
- %(LinuxRid.Identity)
-
+
+
+ %(UnixRid.Identity)
+
+
+
+
+ %(UnixRid.Identity)
+
-
- %(RuntimePackWithLinuxRid.Identity).%(RuntimePackWithLinuxRid.LinuxRid)
+
+ %(RuntimePackWithUnixRid.Identity).%(RuntimePackWithUnixRid.UnixRid)
-
- %(PortablePackageWithLinuxRid.Identity)
+
+ %(PortablePackageWithUnixRid.Identity)
-
- runtime.%(PortablePackageWithLinuxRid.LinuxRid).%(PortablePackageWithLinuxRid.Identity)
+
+ runtime.%(PortablePackageWithUnixRid.UnixRid).%(PortablePackageWithUnixRid.Identity)
-
- runtime.%(PortablePackageWithLinuxRid.LinuxRid).runtime.native.%(PortablePackageWithLinuxRid.Identity)
+
+ runtime.%(PortablePackageWithUnixRid.UnixRid).runtime.native.%(PortablePackageWithUnixRid.Identity)
diff -Nru dotnet8-8.0.100-8.0.0~rc2/eng/bootstrap/OverrideBootstrapVersions.props dotnet8-8.0.100-8.0.0/eng/bootstrap/OverrideBootstrapVersions.props
--- dotnet8-8.0.100-8.0.0~rc2/eng/bootstrap/OverrideBootstrapVersions.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/eng/bootstrap/OverrideBootstrapVersions.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,11 +1,23 @@
-
- 7.0.4-servicing.23107.6
+
+ 7.0.4-servicing.23107.6
+
+ $(NonshippingRuntimeVersionFor700)
- $(NonshippingRuntimeVersionFor700)
- $(NonshippingRuntimeVersionFor700)
- $(NonshippingRuntimeVersionFor700)
+ 8.0.0-rc.2.23479.6
+ $(Msft80RC2RuntimeVersion)
+ $(Msft80RC2RuntimeVersion)
+ $(Msft80RC2RuntimeVersion)
+ $(Msft80RC2RuntimeVersion)
+ $(Msft80RC2RuntimeVersion)
+ $(Msft80RC2RuntimeVersion)
+ $(Msft80RC2RuntimeVersion)
+ $(Msft80RC2RuntimeVersion)
+ $(Msft80RC2RuntimeVersion)
+ $(Msft80RC2RuntimeVersion)
+ $(Msft80RC2RuntimeVersion)
+ $(Msft80RC2RuntimeVersion)
diff -Nru dotnet8-8.0.100-8.0.0~rc2/eng/common/sdk-task.ps1 dotnet8-8.0.100-8.0.0/eng/common/sdk-task.ps1
--- dotnet8-8.0.100-8.0.0~rc2/eng/common/sdk-task.ps1 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/eng/common/sdk-task.ps1 2023-11-13 13:20:34.000000000 +0000
@@ -64,7 +64,7 @@
$GlobalJson.tools | Add-Member -Name "vs" -Value (ConvertFrom-Json "{ `"version`": `"16.5`" }") -MemberType NoteProperty
}
if( -not ($GlobalJson.tools.PSObject.Properties.Name -match "xcopy-msbuild" )) {
- $GlobalJson.tools | Add-Member -Name "xcopy-msbuild" -Value "17.6.0-2" -MemberType NoteProperty
+ $GlobalJson.tools | Add-Member -Name "xcopy-msbuild" -Value "17.8.1-2" -MemberType NoteProperty
}
if ($GlobalJson.tools."xcopy-msbuild".Trim() -ine "none") {
$xcopyMSBuildToolsFolder = InitializeXCopyMSBuild $GlobalJson.tools."xcopy-msbuild" -install $true
diff -Nru dotnet8-8.0.100-8.0.0~rc2/eng/common/tools.ps1 dotnet8-8.0.100-8.0.0/eng/common/tools.ps1
--- dotnet8-8.0.100-8.0.0~rc2/eng/common/tools.ps1 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/eng/common/tools.ps1 2023-11-13 13:20:34.000000000 +0000
@@ -379,13 +379,13 @@
}
# Minimum VS version to require.
- $vsMinVersionReqdStr = '17.6'
+ $vsMinVersionReqdStr = '17.7'
$vsMinVersionReqd = [Version]::new($vsMinVersionReqdStr)
# If the version of msbuild is going to be xcopied,
# use this version. Version matches a package here:
- # https://dev.azure.com/dnceng/public/_artifacts/feed/dotnet-eng/NuGet/RoslynTools.MSBuild/versions/17.6.0-2
- $defaultXCopyMSBuildVersion = '17.6.0-2'
+ # https://dev.azure.com/dnceng/public/_artifacts/feed/dotnet-eng/NuGet/RoslynTools.MSBuild/versions/17.8.1-2
+ $defaultXCopyMSBuildVersion = '17.8.1-2'
if (!$vsRequirements) {
if (Get-Member -InputObject $GlobalJson.tools -Name 'vs') {
diff -Nru dotnet8-8.0.100-8.0.0~rc2/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.LeakDetection/CheckForPoison.cs dotnet8-8.0.100-8.0.0/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.LeakDetection/CheckForPoison.cs
--- dotnet8-8.0.100-8.0.0~rc2/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.LeakDetection/CheckForPoison.cs 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.LeakDetection/CheckForPoison.cs 2023-11-13 13:20:34.000000000 +0000
@@ -12,8 +12,11 @@
using System.IO.Compression;
using System.Linq;
using System.Reflection;
+using System.Reflection.Metadata;
+using System.Reflection.PortableExecutable;
using System.Security.Cryptography;
using System.Text;
+using System.Text.RegularExpressions;
using System.Xml;
using System.Xml.Linq;
@@ -30,6 +33,12 @@
public ITaskItem[] FilesToCheck { get; set; }
///
+ /// The path of the project directory to the FilesToCheck.
+ ///
+ [Required]
+ public string ProjectDirPath { get; set; }
+
+ ///
/// The output path for an XML poison report, if desired.
///
public string PoisonReportOutputFilePath { get; set; }
@@ -141,6 +150,10 @@
private const string PoisonMarker = "POISONED";
+ private const string SbrpAttributeType = "System.Reflection.AssemblyMetadataAttribute";
+
+ private record CandidateFileEntry(string ExtractedPath, string DisplayPath);
+
public override bool Execute()
{
IEnumerable poisons = GetPoisonedFiles(FilesToCheck.Select(f => f.ItemSpec), HashCatalogFilePath, MarkerFileName);
@@ -176,7 +189,9 @@
IEnumerable nonShippingPackages = GetAllNonShippingPackages();
IEnumerable catalogedPackages = ReadCatalog(catalogedPackagesFilePath);
var poisons = new List();
- var candidateQueue = new Queue(initialCandidates);
+ var candidateQueue = new Queue(initialCandidates.Select(candidate =>
+ new CandidateFileEntry(candidate, Utility.MakeRelativePath(candidate, ProjectDirPath))));
+
if (!string.IsNullOrWhiteSpace(OverrideTempPath))
{
Directory.CreateDirectory(OverrideTempPath);
@@ -186,22 +201,22 @@
while (candidateQueue.Any())
{
- var checking = candidateQueue.Dequeue();
+ var candidate = candidateQueue.Dequeue();
// if this is a zip or NuPkg, extract it, check for the poison marker, and
// add its contents to the list to be checked.
- if (ZipFileExtensions.Concat(TarFileExtensions).Concat(TarGzFileExtensions).Any(e => checking.ToLowerInvariant().EndsWith(e)))
+ if (ZipFileExtensions.Concat(TarFileExtensions).Concat(TarGzFileExtensions).Any(e => candidate.ExtractedPath.ToLowerInvariant().EndsWith(e)))
{
- Log.LogMessage($"Zip or NuPkg file to check: {checking}");
+ Log.LogMessage($"Zip or NuPkg file to check: {candidate.ExtractedPath}");
// Skip non-shipping packages
- if (nonShippingPackages.Contains(Path.GetFileName(checking), StringComparer.OrdinalIgnoreCase))
+ if (nonShippingPackages.Contains(Path.GetFileName(candidate.ExtractedPath), StringComparer.OrdinalIgnoreCase))
{
continue;
}
- var tempCheckingDir = Path.Combine(tempDir.FullName, Path.GetFileNameWithoutExtension(checking));
- PoisonedFileEntry result = ExtractAndCheckZipFileOnly(catalogedPackages, checking, markerFileName, tempCheckingDir, candidateQueue);
+ var tempCheckingDir = Path.Combine(tempDir.FullName, Path.GetFileNameWithoutExtension(candidate.ExtractedPath));
+ PoisonedFileEntry result = ExtractAndCheckZipFileOnly(catalogedPackages, candidate, markerFileName, tempCheckingDir, candidateQueue);
if (result != null)
{
poisons.Add(result);
@@ -209,7 +224,7 @@
}
else
{
- PoisonedFileEntry result = CheckSingleFile(catalogedPackages, tempDir.FullName, checking);
+ PoisonedFileEntry result = CheckSingleFile(catalogedPackages, candidate);
if (result != null)
{
poisons.Add(result);
@@ -237,10 +252,12 @@
}
}
- private static PoisonedFileEntry CheckSingleFile(IEnumerable catalogedPackages, string rootPath, string fileToCheck)
+ private static PoisonedFileEntry CheckSingleFile(IEnumerable catalogedPackages, CandidateFileEntry candidate)
{
// skip some common files that get copied verbatim from nupkgs - LICENSE, _._, etc as well as
// file types that we never care about - text files, .gitconfig, etc.
+ var fileToCheck = candidate.ExtractedPath;
+
if (FileNamesToSkip.Any(f => Path.GetFileName(fileToCheck).ToLowerInvariant() == f.ToLowerInvariant()) ||
FileExtensionsToSkip.Any(e => Path.GetExtension(fileToCheck).ToLowerInvariant() == e.ToLowerInvariant()) ||
(new FileInfo(fileToCheck).Length == 0))
@@ -249,7 +266,7 @@
}
var poisonEntry = new PoisonedFileEntry();
- poisonEntry.Path = Utility.MakeRelativePath(fileToCheck, rootPath);
+ poisonEntry.Path = candidate.DisplayPath;
// There seems to be some weird issues with using file streams both for hashing and assembly loading.
// Copy everything into a memory stream to avoid these problems.
@@ -286,7 +303,11 @@
try
{
AssemblyName asm = AssemblyName.GetAssemblyName(fileToCheck);
- if (IsAssemblyPoisoned(fileToCheck))
+ if (!candidate.DisplayPath.Contains("SourceBuildReferencePackages") && IsAssemblyFromSbrp(fileToCheck))
+ {
+ poisonEntry.Type |= PoisonType.SourceBuildReferenceAssembly;
+ }
+ else if (IsAssemblyPoisoned(fileToCheck))
{
poisonEntry.Type |= PoisonType.AssemblyAttribute;
}
@@ -320,9 +341,51 @@
return false;
}
- private static PoisonedFileEntry ExtractAndCheckZipFileOnly(IEnumerable catalogedPackages, string zipToCheck, string markerFileName, string tempDir, Queue futureFilesToCheck)
+ private static bool IsAssemblyFromSbrp(string assemblyPath)
+ {
+ using var stream = new FileStream(assemblyPath, FileMode.Open, FileAccess.Read, FileShare.ReadWrite);
+ using var peReader = new PEReader(stream);
+
+ MetadataReader reader = peReader.GetMetadataReader();
+ return reader.CustomAttributes.Select(attrHandle => reader.GetCustomAttribute(attrHandle))
+ .Any(attr => IsAttributeSbrp(reader, attr));
+ }
+
+ private static bool IsAttributeSbrp(MetadataReader reader, CustomAttribute attr)
+ {
+ string attributeType = string.Empty;
+
+ if (attr.Constructor.Kind == HandleKind.MemberReference)
+ {
+ MemberReference mref = reader.GetMemberReference((MemberReferenceHandle)attr.Constructor);
+
+ if (mref.Parent.Kind == HandleKind.TypeReference)
+ {
+ TypeReference tref = reader.GetTypeReference((TypeReferenceHandle)mref.Parent);
+ attributeType = $"{reader.GetString(tref.Namespace)}.{reader.GetString(tref.Name)}";
+ }
+ }
+
+ if (attributeType == SbrpAttributeType)
+ {
+ var decodedValue = attr.DecodeValue(DummyAttributeTypeProvider.Instance);
+ try
+ {
+ return decodedValue.FixedArguments[0].Value.ToString() == "source" && decodedValue.FixedArguments[1].Value.ToString() == "source-build-reference-packages";
+ }
+ catch
+ {
+ throw new InvalidOperationException($"{SbrpAttributeType} is not formatted properly with a key, value pair.");
+ }
+ }
+
+ return false;
+ }
+
+ private static PoisonedFileEntry ExtractAndCheckZipFileOnly(IEnumerable catalogedPackages, CandidateFileEntry candidate, string markerFileName, string tempDir, Queue futureFilesToCheck)
{
var poisonEntry = new PoisonedFileEntry();
+ var zipToCheck = candidate.ExtractedPath;
poisonEntry.Path = zipToCheck;
using (var sha = SHA256.Create())
@@ -375,8 +438,9 @@
foreach (var child in Directory.EnumerateFiles(tempDir, "*", SearchOption.AllDirectories))
{
- // also add anything in this zip/package for checking
- futureFilesToCheck.Enqueue(child);
+ string displayPath = $"{candidate.DisplayPath}/{child.Replace(tempDir, string.Empty).TrimStart(Path.DirectorySeparatorChar)}";
+
+ futureFilesToCheck.Enqueue(new CandidateFileEntry(child, displayPath));
}
return poisonEntry.Type != PoisonType.None ? poisonEntry : null;
diff -Nru dotnet8-8.0.100-8.0.0~rc2/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.LeakDetection/DummyAttributeTypeProvider.cs dotnet8-8.0.100-8.0.0/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.LeakDetection/DummyAttributeTypeProvider.cs
--- dotnet8-8.0.100-8.0.0~rc2/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.LeakDetection/DummyAttributeTypeProvider.cs 1970-01-01 00:00:00.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.LeakDetection/DummyAttributeTypeProvider.cs 2023-11-13 13:20:34.000000000 +0000
@@ -0,0 +1,34 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+using System;
+using System.Reflection;
+using System.Reflection.Metadata;
+using System.Reflection.Metadata.Ecma335;
+
+namespace Microsoft.DotNet.SourceBuild.Tasks.LeakDetection
+{
+
+ // An empty ICustomAttributeTypeProvider implementation is necessary to read metadata attribute values.
+ internal class DummyAttributeTypeProvider : ICustomAttributeTypeProvider
+ {
+ public static readonly DummyAttributeTypeProvider Instance = new();
+
+ public Type GetPrimitiveType(PrimitiveTypeCode typeCode) => default(Type);
+
+ public Type GetSystemType() => default(Type);
+
+ public Type GetSZArrayType(Type elementType) => default(Type);
+
+ public Type GetTypeFromDefinition(MetadataReader reader, TypeDefinitionHandle handle, byte rawTypeKind) => default(Type);
+
+ public Type GetTypeFromReference(MetadataReader reader, TypeReferenceHandle handle, byte rawTypeKind) => default(Type);
+
+ public Type GetTypeFromSerializedName(string name) => default(Type);
+
+ public PrimitiveTypeCode GetUnderlyingEnumType(Type type) => default(PrimitiveTypeCode);
+
+ public bool IsSystemType(Type type) => default(bool);
+ }
+}
diff -Nru dotnet8-8.0.100-8.0.0~rc2/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.LeakDetection/MarkAndCatalogPackages.cs dotnet8-8.0.100-8.0.0/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.LeakDetection/MarkAndCatalogPackages.cs
--- dotnet8-8.0.100-8.0.0~rc2/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.LeakDetection/MarkAndCatalogPackages.cs 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.LeakDetection/MarkAndCatalogPackages.cs 2023-11-13 13:20:34.000000000 +0000
@@ -155,6 +155,7 @@
}
File.Delete(p.ItemSpec);
File.Move(poisonedPackagePath, p.ItemSpec);
+ Directory.Delete(packageTempPath, true);
}
}
diff -Nru dotnet8-8.0.100-8.0.0~rc2/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.LeakDetection/PoisonType.cs dotnet8-8.0.100-8.0.0/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.LeakDetection/PoisonType.cs
--- dotnet8-8.0.100-8.0.0~rc2/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.LeakDetection/PoisonType.cs 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.LeakDetection/PoisonType.cs 2023-11-13 13:20:34.000000000 +0000
@@ -11,5 +11,6 @@
Hash = 1,
AssemblyAttribute = 2,
NupkgFile = 4,
+ SourceBuildReferenceAssembly = 8,
}
}
diff -Nru dotnet8-8.0.100-8.0.0~rc2/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.XPlat/CreateSdkSymbolsLayout.cs dotnet8-8.0.100-8.0.0/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.XPlat/CreateSdkSymbolsLayout.cs
--- dotnet8-8.0.100-8.0.0~rc2/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.XPlat/CreateSdkSymbolsLayout.cs 1970-01-01 00:00:00.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/eng/tools/tasks/Microsoft.DotNet.SourceBuild.Tasks.XPlat/CreateSdkSymbolsLayout.cs 2023-11-13 13:20:34.000000000 +0000
@@ -0,0 +1,163 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Reflection.Metadata;
+using System.Reflection.PortableExecutable;
+using Microsoft.Build.Framework;
+using Microsoft.Build.Utilities;
+
+namespace Microsoft.DotNet.Build.Tasks
+{
+ // Creates a symbols layout that matches the SDK layout
+ public class CreateSdkSymbolsLayout : Task
+ {
+ ///
+ /// Path to SDK layout.
+ ///
+ [Required]
+ public string SdkLayoutPath { get; set; }
+
+ ///
+ /// Path to all source-built symbols, flat or with folder hierarchy.
+ ///
+ [Required]
+ public string AllSymbolsPath { get; set; }
+
+ ///
+ /// Path to SDK symbols layout - will be created if it doesn't exist.
+ ///
+ [Required]
+ public string SdkSymbolsLayoutPath { get; set; }
+
+ ///
+ /// If true, fails the build if any PDBs are missing.
+ ///
+ public bool FailOnMissingPDBs { get; set; }
+
+ public override bool Execute()
+ {
+ IList filesWithoutPDBs = GenerateSymbolsLayout(IndexAllSymbols());
+ if (filesWithoutPDBs.Count > 0)
+ {
+ LogErrorOrWarning(FailOnMissingPDBs, $"Did not find PDBs for the following SDK files:");
+ foreach (string file in filesWithoutPDBs)
+ {
+ LogErrorOrWarning(FailOnMissingPDBs, file);
+ }
+ }
+
+ return !Log.HasLoggedErrors;
+ }
+
+ private void LogErrorOrWarning(bool isError, string message)
+ {
+ if (isError)
+ {
+ Log.LogError(message);
+ }
+ else
+ {
+ Log.LogWarning(message);
+ }
+ }
+
+ private IList GenerateSymbolsLayout(Hashtable allPdbGuids)
+ {
+ List filesWithoutPDBs = new List();
+
+ if (Directory.Exists(SdkSymbolsLayoutPath))
+ {
+ Directory.Delete(SdkSymbolsLayoutPath, true);
+ }
+
+ foreach (string file in Directory.GetFiles(SdkLayoutPath, "*", SearchOption.AllDirectories))
+ {
+ if (file.EndsWith(".dll", StringComparison.InvariantCultureIgnoreCase) &&
+ !file.EndsWith(".resources.dll", StringComparison.InvariantCultureIgnoreCase))
+ {
+ string guid = string.Empty;
+ using var pdbStream = File.OpenRead(file);
+ using var peReader = new PEReader(pdbStream);
+ try
+ {
+ // Check if pdb is embedded
+ if (peReader.ReadDebugDirectory().Any(entry => entry.Type == DebugDirectoryEntryType.EmbeddedPortablePdb))
+ {
+ continue;
+ }
+
+ var debugDirectory = peReader.ReadDebugDirectory().First(entry => entry.Type == DebugDirectoryEntryType.CodeView);
+ var codeViewData = peReader.ReadCodeViewDebugDirectoryData(debugDirectory);
+ guid = $"{codeViewData.Guid.ToString("N").Replace("-", string.Empty)}";
+ }
+ catch (Exception e) when (e is BadImageFormatException || e is InvalidOperationException)
+ {
+ // Ignore binaries without debug info
+ continue;
+ }
+
+ if (guid != string.Empty)
+ {
+ string debugId = GetDebugId(guid, file);
+ if (!allPdbGuids.ContainsKey(debugId))
+ {
+ filesWithoutPDBs.Add(file.Substring(SdkLayoutPath.Length + 1));
+ }
+ else
+ {
+ // Copy matching pdb to symbols path, preserving sdk binary's hierarchy
+ string sourcePath = (string)allPdbGuids[debugId]!;
+ string destinationPath =
+ file.Replace(SdkLayoutPath, SdkSymbolsLayoutPath)
+ .Replace(Path.GetFileName(file), Path.GetFileName(sourcePath));
+
+ Directory.CreateDirectory(Path.GetDirectoryName(destinationPath)!);
+ File.Copy(sourcePath, destinationPath, true);
+ }
+ }
+ }
+ }
+
+ return filesWithoutPDBs;
+ }
+
+ public Hashtable IndexAllSymbols()
+ {
+ Hashtable allPdbGuids = new Hashtable();
+
+ foreach (string file in Directory.GetFiles(AllSymbolsPath, "*.pdb", SearchOption.AllDirectories))
+ {
+ using var pdbFileStream = File.OpenRead(file);
+ var metadataProvider = MetadataReaderProvider.FromPortablePdbStream(pdbFileStream);
+ var metadataReader = metadataProvider.GetMetadataReader();
+ if (metadataReader.DebugMetadataHeader == null)
+ {
+ continue;
+ }
+
+ var id = new BlobContentId(metadataReader.DebugMetadataHeader.Id);
+ string guid = $"{id.Guid:N}";
+ string debugId = GetDebugId(guid, file);
+ if (!string.IsNullOrEmpty(guid) && !allPdbGuids.ContainsKey(debugId))
+ {
+ allPdbGuids.Add(debugId, file);
+ }
+ }
+
+ return allPdbGuids;
+ }
+
+ ///
+ /// Calculates a debug Id from debug guid and filename. We use this as a key
+ /// in PDB hashtable. Guid is not enough due to collisions in several PDBs.
+ ///
+ private string GetDebugId(string guid, string file) =>
+ $"{guid}.{Path.GetFileNameWithoutExtension(file)}".ToLower();
+ }
+}
diff -Nru dotnet8-8.0.100-8.0.0~rc2/eng/Versions.props dotnet8-8.0.100-8.0.0/eng/Versions.props
--- dotnet8-8.0.100-8.0.0~rc2/eng/Versions.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/eng/Versions.props 2023-11-13 13:20:34.000000000 +0000
@@ -30,7 +30,7 @@
These URLs can't be composed from their base URL and version as we read them from the
prep.sh and pipeline scripts, outside of MSBuild.
-->
- https://dotnetcli.azureedge.net/source-built-artifacts/assets/Private.SourceBuilt.Artifacts.8.0.100-rc.1.23455.1.centos.8-x64.tar.gz
- https://dotnetcli.azureedge.net/source-built-artifacts/sdks/dotnet-sdk-8.0.100-rc.1.23455.1-centos.8-x64.tar.gz
+ https://dotnetcli.azureedge.net/source-built-artifacts/assets/Private.SourceBuilt.Artifacts.8.0.100-rc.2.23502.1.centos.8-x64.tar.gz
+ https://dotnetcli.azureedge.net/source-built-artifacts/sdks/dotnet-sdk-8.0.100-rc.2.23502.1-centos.8-x64.tar.gz
diff -Nru dotnet8-8.0.100-8.0.0~rc2/global.json dotnet8-8.0.100-8.0.0/global.json
--- dotnet8-8.0.100-8.0.0~rc2/global.json 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/global.json 2023-11-13 13:20:34.000000000 +0000
@@ -1,6 +1,6 @@
{
"tools": {
- "dotnet": "8.0.100-rc.1.23455.8"
+ "dotnet": "8.0.100-rc.2.23502.2"
},
"msbuild-sdks": {
"Microsoft.Build.CentralPackageVersions": "2.0.1",
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/AllRepoVersions.props dotnet8-8.0.100-8.0.0/prereqs/git-info/AllRepoVersions.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/AllRepoVersions.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/AllRepoVersions.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,57 +1,59 @@
- 1d451c32dda2314c721adbf8829e1c0cd4e681ff
- 8.0.0-beta.23463.1
- f0cc6b11bd8f0826c63d75483578e868c8abe75e
- 8.0.0-rc.2.23480.2
- 89be445dd4936157533ad96bafb95f701430653a
- 0.11.4-alpha.23468.2
+ 39042b4048580366d35a7c1c4f4ce8fc0dbea4b4
+ 8.0.0-beta.23516.4
+ 4b00c57d7ccf9a4c7e2aef211ab6bd8af3ee2324
+ 8.0.0-preview.1.23551.7
+ 3f1acb59718cadf111a0a796681e3d3509bb3381
+ 8.0.0-rtm.23531.12
+ 45dd3a73dd5b64b010c4251303b3664bb30df029
+ 0.11.4-alpha.23509.202fe27cd6a9b001c8feb7938e6ef4b37997457590.1.4307015957c5c5f85f17c145e7fab4ece37ad6aafcded98.0.0-preview.6.23463.15ce78f66d89ea529e459abddb129ab36cb5bd9367.0.0-preview.23211.1
- 8e857c92bff556d919f5f904bd9c777aade4afba
- 8.0.0-rc.2.23473.3
- 0cf0f51b02d759e89f2acc09663493ab422548b0
- 8.0.447701
- 10f956e631a1efc0f7f5e49c626c494cd32b1f50
- 8.0.100-beta.23475.2
- 0abacfc2b649dfe89cb9bd91930afe95c10dec16
+ 2406616d0e3a31d80b326e27c156955bfa41c791
+ 8.0.0-rtm.23530.2
+ 2651752953c0d41c8c7b8d661cf2237151af33d0
+ 8.0.453106
+ f41fe153f68dd6b20cf4f91de9ea1e55fc09bb20
+ 8.0.100-beta.23523.6
+ 57efcf1350ab087efe97ced8199ab0b494636adf8.0.100
- 6cdef424154c976f04802b101e6be6292f8a8897
- 17.8.0-preview-23472-04
- 7fb5ed887352d2892797a365cfdd7bb8df029941
- 6.8.0-rc.117
- 2daeaaaed440a9c59b063b1578616850a0ccddd1
- 7.0.0-preview.23473.1
- 4a7701fd72094614897b33e4cb1d9640c221d862
- 3.11.0-beta1.23472.1
- bdd9c5ba66b00beebdc3516acc5e29b83efd89af
- 4.8.0-3.23471.11
- 0b25e38ad32a69cd83ae246104b32449203cc71c
- 8.0.0-rc.2.23475.17
- 67e671f384bee6937630b52b02cc78e69b27e280
- 8.0.100-rc.2.23480.5
- 6dbf3aaa0fc9664df86462f5c70b99800934fccd
- 8.0.0-alpha.1.23471.2
- d825c6693d4e26f63aaa93c3c1d057faa098e347
- 8.0.0-alpha.1.23469.1
- ea3bd92278af83bae656ad9747c11f5a345e5b4a
- 8.0.0-beta.23469.1
+ 195e7f5a3a8e51c37d83cd9e54cb99dc3fc69c22
+ 17.8.3-preview-23519-04
+ 0dd5a1ea536201af94725353e4bc711d7560b246
+ 6.8.0-rc.122
+ 94fc3bd6fb6c8611fd4495e350db0560f46ece19
+ 7.0.0-preview.23520.1
+ b4d9a1334d5189172977ba8fddd00bda70161e4a
+ 3.11.0-beta1.23525.2
+ f43cd10b737b6343956dee421cff8c50b602c788
+ 4.8.0-3.23524.11
+ 5535e31a712343a63f5d7d796cd874e563e5ac14
+ 8.0.0-rtm.23531.3
+ e9d13cbe7e8c1d52ce276a8655f52a87e1017c46
+ 8.0.100-rtm.23551.6
+ 3dc05150cf234f76f6936dcb2853d31a0da1f60e
+ 8.0.0-alpha.1.23518.1
+ b4fa7f2e1e65ef49881be2ab2df27624280a8c55
+ 8.0.0-alpha.1.23516.4
+ e2f4720f9e7411122675568b984606c405b3bb53
+ 8.0.0-beta.23510.22c8079e2e8e78c0cd11ac75a32014756136ecdb92.1.0-beta.23253.1
- 50dde258eee728e3ca730351dc6496639b55201a
- 8.0.100-rc.2.23479.16
+ 4085146587b833948a22587b36a108bcdb3f04a3
+ 8.0.100-rtm.23531.61e5f3603af2277910aad946736ee23283e7f3e161.1.0-rc.23410.2
- cf7d549fc0197abaabec19d61d2c20d7a7b089f8
- 17.8.0-release-23468-02
+ ae25c3b96fe433c60af70e3991ace49fcbf7e970
+ 17.8.0-release-23523-039a1c3e1b7f0c8763d4c96e593961a61a72679a7b7.0.0-preview.22423.2
- 194f32828726c3f1f63f79f3dc09b9e99c157b11
- 1.0.0-beta.23426.1
+ 73f0850939d96131c28cf6ea6ee5aacb4da0083a
+ 1.0.0-beta.23475.1
\ No newline at end of file
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/arcade.props dotnet8-8.0.100-8.0.0/prereqs/git-info/arcade.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/arcade.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/arcade.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,9 +1,9 @@
- 1d451c32dda2314c721adbf8829e1c0cd4e681ff
- 20230913.1
- 8.0.0-beta.23463.1
+ 39042b4048580366d35a7c1c4f4ce8fc0dbea4b4
+ 20231016.4
+ 8.0.0-beta.23516.4betafalse
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/aspire.props dotnet8-8.0.100-8.0.0/prereqs/git-info/aspire.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/aspire.props 1970-01-01 00:00:00.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/aspire.props 2023-11-13 13:20:34.000000000 +0000
@@ -0,0 +1,10 @@
+
+
+
+ 4b00c57d7ccf9a4c7e2aef211ab6bd8af3ee2324
+ 20231101.7
+ 8.0.0-preview.1.23551.7
+ preview.1
+ false
+
+
\ No newline at end of file
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/aspnetcore.props dotnet8-8.0.100-8.0.0/prereqs/git-info/aspnetcore.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/aspnetcore.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/aspnetcore.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,10 +1,10 @@
- f0cc6b11bd8f0826c63d75483578e868c8abe75e
- 20230930.2
- 8.0.0-rc.2.23480.2
- rc.2
+ 3f1acb59718cadf111a0a796681e3d3509bb3381
+ 20231031.12
+ 8.0.0-rtm.23531.12
+ rtmfalse
\ No newline at end of file
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/cecil.props dotnet8-8.0.100-8.0.0/prereqs/git-info/cecil.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/cecil.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/cecil.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,9 +1,9 @@
- 89be445dd4936157533ad96bafb95f701430653a
- 20230918.2
- 0.11.4-alpha.23468.2
+ 45dd3a73dd5b64b010c4251303b3664bb30df029
+ 20231009.2
+ 0.11.4-alpha.23509.2alphafalse
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/emsdk.props dotnet8-8.0.100-8.0.0/prereqs/git-info/emsdk.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/emsdk.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/emsdk.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,10 +1,10 @@
- 8e857c92bff556d919f5f904bd9c777aade4afba
- 20230923.3
- 8.0.0-rc.2.23473.3
- rc.2
+ 2406616d0e3a31d80b326e27c156955bfa41c791
+ 20231030.2
+ 8.0.0-rtm.23530.2
+ rtmfalse
\ No newline at end of file
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/format.props dotnet8-8.0.100-8.0.0/prereqs/git-info/format.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/format.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/format.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,9 +1,9 @@
- 0cf0f51b02d759e89f2acc09663493ab422548b0
- 20230930.1
- 8.0.447701
+ 2651752953c0d41c8c7b8d661cf2237151af33d0
+ 20231101.1
+ 8.0.453106true
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/fsharp.props dotnet8-8.0.100-8.0.0/prereqs/git-info/fsharp.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/fsharp.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/fsharp.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,9 +1,9 @@
- 10f956e631a1efc0f7f5e49c626c494cd32b1f50
- 20230925.2
- 8.0.100-beta.23475.2
+ f41fe153f68dd6b20cf4f91de9ea1e55fc09bb20
+ 20231023.6
+ 8.0.100-beta.23523.6betafalse
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/installer.props dotnet8-8.0.100-8.0.0/prereqs/git-info/installer.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/installer.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/installer.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,8 +1,8 @@
- 0abacfc2b649dfe89cb9bd91930afe95c10dec16
- 20231002.1
+ 57efcf1350ab087efe97ced8199ab0b494636adf
+ 20231101.18.0.100true
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/msbuild.props dotnet8-8.0.100-8.0.0/prereqs/git-info/msbuild.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/msbuild.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/msbuild.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,9 +1,9 @@
- 6cdef424154c976f04802b101e6be6292f8a8897
- 20230922.4
- 17.8.0-preview-23472-04
+ 195e7f5a3a8e51c37d83cd9e54cb99dc3fc69c22
+ 20231019.4
+ 17.8.3-preview-23519-04previewfalse
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/nuget-client.props dotnet8-8.0.100-8.0.0/prereqs/git-info/nuget-client.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/nuget-client.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/nuget-client.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,9 +1,9 @@
- 7fb5ed887352d2892797a365cfdd7bb8df029941
+ 0dd5a1ea536201af94725353e4bc711d7560b24620230927.1
- 6.8.0-rc.117
+ 6.8.0-rc.122rcfalse
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/razor.props dotnet8-8.0.100-8.0.0/prereqs/git-info/razor.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/razor.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/razor.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,9 +1,9 @@
- 2daeaaaed440a9c59b063b1578616850a0ccddd1
- 20230923.1
- 7.0.0-preview.23473.1
+ 94fc3bd6fb6c8611fd4495e350db0560f46ece19
+ 20231020.1
+ 7.0.0-preview.23520.1previewfalse
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/roslyn-analyzers.props dotnet8-8.0.100-8.0.0/prereqs/git-info/roslyn-analyzers.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/roslyn-analyzers.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/roslyn-analyzers.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,9 +1,9 @@
- 4a7701fd72094614897b33e4cb1d9640c221d862
- 20230922.1
- 3.11.0-beta1.23472.1
+ b4d9a1334d5189172977ba8fddd00bda70161e4a
+ 20231025.2
+ 3.11.0-beta1.23525.2beta1false
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/roslyn.props dotnet8-8.0.100-8.0.0/prereqs/git-info/roslyn.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/roslyn.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/roslyn.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,9 +1,9 @@
- bdd9c5ba66b00beebdc3516acc5e29b83efd89af
- 20230921.11
- 4.8.0-3.23471.11
+ f43cd10b737b6343956dee421cff8c50b602c788
+ 20231024.11
+ 4.8.0-3.23524.113false
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/runtime.props dotnet8-8.0.100-8.0.0/prereqs/git-info/runtime.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/runtime.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/runtime.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,10 +1,10 @@
- 0b25e38ad32a69cd83ae246104b32449203cc71c
- 20230925.17
- 8.0.0-rc.2.23475.17
- rc.2
+ 5535e31a712343a63f5d7d796cd874e563e5ac14
+ 20231031.3
+ 8.0.0-rtm.23531.3
+ rtmfalse
\ No newline at end of file
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/sdk.props dotnet8-8.0.100-8.0.0/prereqs/git-info/sdk.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/sdk.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/sdk.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,10 +1,10 @@
- 67e671f384bee6937630b52b02cc78e69b27e280
- 20230930.5
- 8.0.100-rc.2.23480.5
- rc.2
+ e9d13cbe7e8c1d52ce276a8655f52a87e1017c46
+ 20231101.6
+ 8.0.100-rtm.23551.6
+ rtmfalse
\ No newline at end of file
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/source-build-externals.props dotnet8-8.0.100-8.0.0/prereqs/git-info/source-build-externals.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/source-build-externals.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/source-build-externals.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,9 +1,9 @@
- 6dbf3aaa0fc9664df86462f5c70b99800934fccd
- 20230921.2
- 8.0.0-alpha.1.23471.2
+ 3dc05150cf234f76f6936dcb2853d31a0da1f60e
+ 20231018.1
+ 8.0.0-alpha.1.23518.1alpha.1false
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/source-build-reference-packages.props dotnet8-8.0.100-8.0.0/prereqs/git-info/source-build-reference-packages.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/source-build-reference-packages.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/source-build-reference-packages.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,9 +1,9 @@
- d825c6693d4e26f63aaa93c3c1d057faa098e347
- 20230919.1
- 8.0.0-alpha.1.23469.1
+ b4fa7f2e1e65ef49881be2ab2df27624280a8c55
+ 20231016.4
+ 8.0.0-alpha.1.23516.4alpha.1false
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/sourcelink.props dotnet8-8.0.100-8.0.0/prereqs/git-info/sourcelink.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/sourcelink.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/sourcelink.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,9 +1,9 @@
- ea3bd92278af83bae656ad9747c11f5a345e5b4a
- 20230919.1
- 8.0.0-beta.23469.1
+ e2f4720f9e7411122675568b984606c405b3bb53
+ 20231010.2
+ 8.0.0-beta.23510.2betafalse
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/templating.props dotnet8-8.0.100-8.0.0/prereqs/git-info/templating.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/templating.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/templating.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,10 +1,10 @@
- 50dde258eee728e3ca730351dc6496639b55201a
- 20230929.16
- 8.0.100-rc.2.23479.16
- rc.2
+ 4085146587b833948a22587b36a108bcdb3f04a3
+ 20231031.6
+ 8.0.100-rtm.23531.6
+ rtmfalse
\ No newline at end of file
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/vstest.props dotnet8-8.0.100-8.0.0/prereqs/git-info/vstest.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/vstest.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/vstest.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,9 +1,9 @@
- cf7d549fc0197abaabec19d61d2c20d7a7b089f8
- 20230918.2
- 17.8.0-release-23468-02
+ ae25c3b96fe433c60af70e3991ace49fcbf7e970
+ 20231023.3
+ 17.8.0-release-23523-03releasefalse
diff -Nru dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/xliff-tasks.props dotnet8-8.0.100-8.0.0/prereqs/git-info/xliff-tasks.props
--- dotnet8-8.0.100-8.0.0~rc2/prereqs/git-info/xliff-tasks.props 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/prereqs/git-info/xliff-tasks.props 2023-11-13 13:20:34.000000000 +0000
@@ -1,9 +1,9 @@
- 194f32828726c3f1f63f79f3dc09b9e99c157b11
- 20230826.1
- 1.0.0-beta.23426.1
+ 73f0850939d96131c28cf6ea6ee5aacb4da0083a
+ 20230925.1
+ 1.0.0-beta.23475.1betafalse
diff -Nru dotnet8-8.0.100-8.0.0~rc2/README.md dotnet8-8.0.100-8.0.0/README.md
--- dotnet8-8.0.100-8.0.0~rc2/README.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/README.md 2023-11-13 13:20:34.000000000 +0000
@@ -144,6 +144,12 @@
In practice, this means that when calling the main build script, you need to provide additional arguments when building outside of a context of a git repository.
Alternatively, you can also provide a manifest file where this information can be read from. This file (`release.json`) can be found attached with the [dotnet/dotnet release](https://github.com/dotnet/dotnet/releases).
+### Synchronizing code into the VMR
+
+Sometimes you want to make a change in a repository and test that change in the VMR. You could of course make the change in the VMR directly (locally, as the VMR is read-only for now) but in case it's already available in your repository, you can synchronize it into the VMR (again locally).
+
+To do this, you can start a [dotnet/dotnet](https://github.com/dotnet/dotnet) Codespace. You will see instructions right when the Codespace starts. Alternatively, you can clone the repository locally and use the `[eng/vmr-sync.sh](../../eng/vmr-sync.sh)` script to do that. Please refer to the documentation in the script for more details.
+
## List of components
To enable full offline source-building of the VMR, we have no other choice than to synchronize all the necessary code into the VMR. This also includes any code referenced via git submodules. More details on why and how this is done can be found here:
@@ -154,15 +160,17 @@
- `src/arcade`
-*[dotnet/arcade@1d451c3](https://github.com/dotnet/arcade/commit/1d451c32dda2314c721adbf8829e1c0cd4e681ff)*
+*[dotnet/arcade@39042b4](https://github.com/dotnet/arcade/commit/39042b4048580366d35a7c1c4f4ce8fc0dbea4b4)*
+- `src/aspire`
+*[_git/dotnet-aspire@4b00c57](https://dev.azure.com/dnceng/internal/_git/dotnet-aspire/commit/4b00c57d7ccf9a4c7e2aef211ab6bd8af3ee2324)*
- `src/aspnetcore`
-*[_git/dotnet-aspnetcore@f0cc6b1](https://dev.azure.com/dnceng/internal/_git/dotnet-aspnetcore/commit/f0cc6b11bd8f0826c63d75483578e868c8abe75e)*
+*[_git/dotnet-aspnetcore@3f1acb5](https://dev.azure.com/dnceng/internal/_git/dotnet-aspnetcore/commit/3f1acb59718cadf111a0a796681e3d3509bb3381)*
- `src/aspnetcore/src/submodules/googletest`
*[google/googletest@7e33b6a](https://github.com/google/googletest/commit/7e33b6a1c497ced1e98fc60175aeb4678419281c)*
- `src/aspnetcore/src/submodules/MessagePack-CSharp`
*[aspnet/MessagePack-CSharp@ecc4e18](https://github.com/aspnet/MessagePack-CSharp/commit/ecc4e18ad7a0c7db51cd7e3d2997a291ed01444d)*
- `src/cecil`
-*[dotnet/cecil@89be445](https://github.com/dotnet/cecil/commit/89be445dd4936157533ad96bafb95f701430653a)*
+*[dotnet/cecil@45dd3a7](https://github.com/dotnet/cecil/commit/45dd3a73dd5b64b010c4251303b3664bb30df029)*
- `src/command-line-api`
*[dotnet/command-line-api@02fe27c](https://github.com/dotnet/command-line-api/commit/02fe27cd6a9b001c8feb7938e6ef4b3799745759)*
- `src/deployment-tools`
@@ -170,37 +178,37 @@
- `src/diagnostics`
*[dotnet/diagnostics@5ce78f6](https://github.com/dotnet/diagnostics/commit/5ce78f66d89ea529e459abddb129ab36cb5bd936)*
- `src/emsdk`
-*[dotnet/emsdk@8e857c9](https://github.com/dotnet/emsdk/commit/8e857c92bff556d919f5f904bd9c777aade4afba)*
+*[dotnet/emsdk@2406616](https://github.com/dotnet/emsdk/commit/2406616d0e3a31d80b326e27c156955bfa41c791)*
- `src/format`
-*[dotnet/format@0cf0f51](https://github.com/dotnet/format/commit/0cf0f51b02d759e89f2acc09663493ab422548b0)*
+*[_git/dotnet-format@2651752](https://dev.azure.com/dnceng/internal/_git/dotnet-format/commit/2651752953c0d41c8c7b8d661cf2237151af33d0)*
- `src/fsharp`
-*[dotnet/fsharp@10f956e](https://github.com/dotnet/fsharp/commit/10f956e631a1efc0f7f5e49c626c494cd32b1f50)*
+*[dotnet/fsharp@f41fe15](https://github.com/dotnet/fsharp/commit/f41fe153f68dd6b20cf4f91de9ea1e55fc09bb20)*
- `src/installer`
-*[dotnet/installer@0abacfc](https://github.com/dotnet/installer/commit/0abacfc2b649dfe89cb9bd91930afe95c10dec16)*
+*[dotnet/installer@57efcf1](https://github.com/dotnet/installer/commit/57efcf1350ab087efe97ced8199ab0b494636adf)*
- `src/msbuild`
-*[dotnet/msbuild@6cdef42](https://github.com/dotnet/msbuild/commit/6cdef424154c976f04802b101e6be6292f8a8897)*
+*[dotnet/msbuild@195e7f5](https://github.com/dotnet/msbuild/commit/195e7f5a3a8e51c37d83cd9e54cb99dc3fc69c22)*
- `src/nuget-client`
-*[nuget/nuget.client@7fb5ed8](https://github.com/nuget/nuget.client/commit/7fb5ed887352d2892797a365cfdd7bb8df029941)*
+*[nuget/nuget.client@0dd5a1e](https://github.com/nuget/nuget.client/commit/0dd5a1ea536201af94725353e4bc711d7560b246)*
- `src/nuget-client/submodules/NuGet.Build.Localization`
*[NuGet/NuGet.Build.Localization@f15db7b](https://github.com/NuGet/NuGet.Build.Localization/commit/f15db7b7c6f5affbea268632ef8333d2687c8031)*
- `src/razor`
-*[dotnet/razor@2daeaaa](https://github.com/dotnet/razor/commit/2daeaaaed440a9c59b063b1578616850a0ccddd1)*
+*[dotnet/razor@94fc3bd](https://github.com/dotnet/razor/commit/94fc3bd6fb6c8611fd4495e350db0560f46ece19)*
- `src/roslyn`
-*[dotnet/roslyn@bdd9c5b](https://github.com/dotnet/roslyn/commit/bdd9c5ba66b00beebdc3516acc5e29b83efd89af)*
+*[dotnet/roslyn@f43cd10](https://github.com/dotnet/roslyn/commit/f43cd10b737b6343956dee421cff8c50b602c788)*
- `src/roslyn-analyzers`
-*[dotnet/roslyn-analyzers@4a7701f](https://github.com/dotnet/roslyn-analyzers/commit/4a7701fd72094614897b33e4cb1d9640c221d862)*
+*[dotnet/roslyn-analyzers@b4d9a13](https://github.com/dotnet/roslyn-analyzers/commit/b4d9a1334d5189172977ba8fddd00bda70161e4a)*
- `src/runtime`
-*[_git/dotnet-runtime@0b25e38](https://dev.azure.com/dnceng/internal/_git/dotnet-runtime/commit/0b25e38ad32a69cd83ae246104b32449203cc71c)*
+*[_git/dotnet-runtime@5535e31](https://dev.azure.com/dnceng/internal/_git/dotnet-runtime/commit/5535e31a712343a63f5d7d796cd874e563e5ac14)*
- `src/sdk`
-*[_git/dotnet-sdk@67e671f](https://dev.azure.com/dnceng/internal/_git/dotnet-sdk/commit/67e671f384bee6937630b52b02cc78e69b27e280)*
+*[_git/dotnet-sdk@e9d13cb](https://dev.azure.com/dnceng/internal/_git/dotnet-sdk/commit/e9d13cbe7e8c1d52ce276a8655f52a87e1017c46)*
- `src/source-build-externals`
-*[dotnet/source-build-externals@6dbf3aa](https://github.com/dotnet/source-build-externals/commit/6dbf3aaa0fc9664df86462f5c70b99800934fccd)*
+*[dotnet/source-build-externals@3dc0515](https://github.com/dotnet/source-build-externals/commit/3dc05150cf234f76f6936dcb2853d31a0da1f60e)*
- `src/source-build-externals/src/abstractions-xunit`
*[xunit/abstractions.xunit@b75d54d](https://github.com/xunit/abstractions.xunit/commit/b75d54d73b141709f805c2001b16f3dd4d71539d)*
- `src/source-build-externals/src/application-insights`
*[Microsoft/ApplicationInsights-dotnet@5e2e7dd](https://github.com/Microsoft/ApplicationInsights-dotnet/commit/5e2e7ddda961ec0e16a75b1ae0a37f6a13c777f5)*
- `src/source-build-externals/src/azure-activedirectory-identitymodel-extensions-for-dotnet`
- *[AzureAD/azure-activedirectory-identitymodel-extensions-for-dotnet@bf4cb25](https://github.com/AzureAD/azure-activedirectory-identitymodel-extensions-for-dotnet/commit/bf4cb251a85f1b27bbb208c703f6f3105bdb24ca)*
+ *[AzureAD/azure-activedirectory-identitymodel-extensions-for-dotnet@bb354ce](https://github.com/AzureAD/azure-activedirectory-identitymodel-extensions-for-dotnet/commit/bb354ceabed19189245e075abb864f327b6c14ad)*
- `src/source-build-externals/src/cssparser`
*[dotnet/cssparser@0d59611](https://github.com/dotnet/cssparser/commit/0d59611784841735a7778a67aa6e9d8d000c861f)*
- `src/source-build-externals/src/docker-creds-provider`
@@ -220,21 +228,21 @@
- `src/source-build-externals/src/xunit/tools/media`
*[xunit/media@5738b6e](https://github.com/xunit/media/commit/5738b6e86f08e0389c4392b939c20e3eca2d9822)*
- `src/source-build-reference-packages`
-*[dotnet/source-build-reference-packages@d825c66](https://github.com/dotnet/source-build-reference-packages/commit/d825c6693d4e26f63aaa93c3c1d057faa098e347)*
+*[dotnet/source-build-reference-packages@b4fa7f2](https://github.com/dotnet/source-build-reference-packages/commit/b4fa7f2e1e65ef49881be2ab2df27624280a8c55)*
- `src/sourcelink`
-*[dotnet/sourcelink@ea3bd92](https://github.com/dotnet/sourcelink/commit/ea3bd92278af83bae656ad9747c11f5a345e5b4a)*
+*[dotnet/sourcelink@e2f4720](https://github.com/dotnet/sourcelink/commit/e2f4720f9e7411122675568b984606c405b3bb53)*
- `src/symreader`
*[dotnet/symreader@2c8079e](https://github.com/dotnet/symreader/commit/2c8079e2e8e78c0cd11ac75a32014756136ecdb9)*
- `src/templating`
-*[_git/dotnet-templating@50dde25](https://dev.azure.com/dnceng/internal/_git/dotnet-templating/commit/50dde258eee728e3ca730351dc6496639b55201a)*
+*[_git/dotnet-templating@4085146](https://dev.azure.com/dnceng/internal/_git/dotnet-templating/commit/4085146587b833948a22587b36a108bcdb3f04a3)*
- `src/test-templates`
*[dotnet/test-templates@1e5f360](https://github.com/dotnet/test-templates/commit/1e5f3603af2277910aad946736ee23283e7f3e16)*
- `src/vstest`
-*[microsoft/vstest@cf7d549](https://github.com/microsoft/vstest/commit/cf7d549fc0197abaabec19d61d2c20d7a7b089f8)*
+*[microsoft/vstest@ae25c3b](https://github.com/microsoft/vstest/commit/ae25c3b96fe433c60af70e3991ace49fcbf7e970)*
- `src/xdt`
*[dotnet/xdt@9a1c3e1](https://github.com/dotnet/xdt/commit/9a1c3e1b7f0c8763d4c96e593961a61a72679a7b)*
- `src/xliff-tasks`
-*[dotnet/xliff-tasks@194f328](https://github.com/dotnet/xliff-tasks/commit/194f32828726c3f1f63f79f3dc09b9e99c157b11)*
+*[dotnet/xliff-tasks@73f0850](https://github.com/dotnet/xliff-tasks/commit/73f0850939d96131c28cf6ea6ee5aacb4da0083a)*
The repository also contains a [JSON manifest](https://github.com/dotnet/dotnet/blob/main/src/source-manifest.json) listing all components in a machine-readable format.
diff -Nru dotnet8-8.0.100-8.0.0~rc2/release.info dotnet8-8.0.100-8.0.0/release.info
--- dotnet8-8.0.100-8.0.0~rc2/release.info 2023-10-18 18:08:36.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/release.info 2023-11-13 13:20:41.000000000 +0000
@@ -1,2 +1,2 @@
RM_GIT_REPO=https://github.com/dotnet/dotnet
-RM_GIT_COMMIT=1e872358329855089d8d14cec1f06d5b075824b5
+RM_GIT_COMMIT=40e7f014ff784457efffa58074549735e30772ae
diff -Nru dotnet8-8.0.100-8.0.0~rc2/repo-projects/aspire.proj dotnet8-8.0.100-8.0.0/repo-projects/aspire.proj
--- dotnet8-8.0.100-8.0.0~rc2/repo-projects/aspire.proj 1970-01-01 00:00:00.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/repo-projects/aspire.proj 2023-11-13 13:20:34.000000000 +0000
@@ -0,0 +1,13 @@
+
+
+
+
+ $(StandardSourceBuildCommand) $(StandardSourceBuildArgs)
+
+
+
+
+
+
+
+
\ No newline at end of file
diff -Nru dotnet8-8.0.100-8.0.0~rc2/repo-projects/dotnet.proj dotnet8-8.0.100-8.0.0/repo-projects/dotnet.proj
--- dotnet8-8.0.100-8.0.0~rc2/repo-projects/dotnet.proj 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/repo-projects/dotnet.proj 2023-11-13 13:20:34.000000000 +0000
@@ -45,6 +45,7 @@
+
diff -Nru dotnet8-8.0.100-8.0.0~rc2/repo-projects/installer.proj dotnet8-8.0.100-8.0.0/repo-projects/installer.proj
--- dotnet8-8.0.100-8.0.0~rc2/repo-projects/installer.proj 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/repo-projects/installer.proj 2023-11-13 13:20:34.000000000 +0000
@@ -45,12 +45,6 @@
-
-
-
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/repo-projects/sdk.proj dotnet8-8.0.100-8.0.0/repo-projects/sdk.proj
--- dotnet8-8.0.100-8.0.0~rc2/repo-projects/sdk.proj 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/repo-projects/sdk.proj 2023-11-13 13:20:34.000000000 +0000
@@ -23,15 +23,6 @@
-
-
-
-
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/azure-pipelines.yml dotnet8-8.0.100-8.0.0/src/arcade/azure-pipelines.yml
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/azure-pipelines.yml 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/azure-pipelines.yml 2023-11-13 13:20:34.000000000 +0000
@@ -7,6 +7,7 @@
- release/5.0
- release/6.0
- release/7.0
+ - release/8.0
paths:
include:
- '*'
@@ -30,6 +31,7 @@
- release/5.0
- release/6.0
- release/7.0
+ - release/8.0
- templates
paths:
include:
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/AzureDevOps/AzureDevOpsOnboarding.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/AzureDevOps/AzureDevOpsOnboarding.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/AzureDevOps/AzureDevOpsOnboarding.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/AzureDevOps/AzureDevOpsOnboarding.md 2023-11-13 13:20:34.000000000 +0000
@@ -54,7 +54,7 @@
### Git (internal) connections
-See the [dotnet-bot-github-service-endpoint documentation](https://github.com/dotnet/arcade/blob/main/Documentation/Project-Docs/VSTS/dotnet-bot-github-service-endpoint.md#dotnet-bot-github-service-endpoint)
+See the [dotnet-bot-github-service-endpoint documentation](https://github.com/dotnet/dnceng/blob/main/Documentation/ProjectDocs/VSTS/dotnet-bot-github-service-endpoint.md#dotnet-bot-github-service-endpoint)
## Agent queues
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/DevWorkflow/Queue-Insights-Documentation.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/DevWorkflow/Queue-Insights-Documentation.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/DevWorkflow/Queue-Insights-Documentation.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/DevWorkflow/Queue-Insights-Documentation.md 2023-11-13 13:20:34.000000000 +0000
@@ -26,7 +26,7 @@
You should interpret this data as this pipeline should finish around the *expected* time but may finish as quick as the *lower* time or take as long as the *higher* time.
-This data is calculated from our build telemetry, and you may visit our [one-pager](../TeamProcess/One-Pagers/pipeline-machine-learning-arcade8824.md) for how it works.
+This data is calculated from our build telemetry, and you may visit our [one-pager](https://github.com/dotnet/dnceng/blob/main/Documentation/TeamProcess/One-Pagers/pipeline-machine-learning-arcade8824.md) for how it works.
### Multi-modal distributions
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Operations/dnceng-operational-responsibilities.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Operations/dnceng-operational-responsibilities.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Operations/dnceng-operational-responsibilities.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Operations/dnceng-operational-responsibilities.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,177 +0,0 @@
-# .NET Core Engineering DevOps V-Team Responsibilities (Tentative short name : "DncDevOps")
-
-Note that TKapin and IlyaS have already spent some time thinking about this problem. That document is [here](https://microsofteur-my.sharepoint.com/:w:/g/personal/tokapin_microsoft_com/EWK52KdVvIZCsfLqe6idj6QBYzML12mx82xsmwtGt6H-Ug?e=PiAiUU)
-
-The above doc defines developer operations work as "Long lived, predictable, repetitive, and possibly highly manual work that needs to be performed periodically to keep providing healthy and secure services to our customers. For the physical world analogy, one could think of operational work as “turning the crank” of a complex machine to keep it operating."
-
-## "North Star" statement
-
-The .NET Core Engineering DevOps (DncDevOps) team strives to use its resources to make daily, repetitive operations undertaken by the team knowable, repeatable, automated (where possible), investigate-able, visible and secure. In doing so it will provide simple means for team members to understand existing and onboard new processes to this ownership. Processes and documentation should strive to be sufficient for vendors or new team members to be able to succeed at executing them.
-
-### Key principles
-
-- 'Manual' isn't a dirty word. Document and get running first, automate later.
-- Strive for simple, procedural guidance for processes.
-- Anything developed by this team needs to be written down, validated by someone else than the author, and findable by all
-- Don't lose issues that don't meet the below bar or FR's; keep customer needs moving until closed or pathed somewhere.
-- When we ask for a document, we have a template for it and the template _says where it should be put_.
-
-## Bar for activities owned by DncDevOps
-
-While exceptions will be made on a case-by-case basis, the following rubric should be applied to incoming requests determining whether it's appropriate to be handled by the DncDevOps team. Note that many of the tasks that will be handled eventually by DncDevOps are handled by the first responder team today.
-
-*Is the request for work in an established area?* DncDevOps should not be owning tasks or processes under their first round (or two, depending on complexity) of development; owned areas should be as close as possible to feature- and documentation-complete, and ready for daily usage. By definition, functionality already used in a .NET Core release shall be considered to be established. If this is not the case, work should be handled by the feature's active v-team, or possibly the First Responders (FR) team.
-
-*Is the work in an area under active development?* The DncDevOps team will work with epic teams to ensure that sustaining operations tasks are documented and reviewed before epic signoff, but in general while these areas are being developed it is preferable for that team to handle day-to-day tasks.
-
-*Is the work addressing immediate customer pain?* Regardless of how manual they may be, short-term customer pain issues should be handled by the First Responders team. Examples include creating a custom branch in a repository, adding a new service connection to an Azure DevOps project, or a one-off investigation of a particular error being faced by the customer. DncDevOps tasks include internal-only issues that have no short-term customer impact. They should represent tasks that repeat on at least weekly or biweekly cadence, such as (using the above "no" list as examples) monitoring the active branches used to ship a repo, keeping track of the service connections we create (why they exist, when they expire, who uses them), fixing issues found in FR investigations, or monitoring ongoing builds by a group of repositories to keep them healthy.
-
-*Is the task pertinent to the daily functioning of the .NET Core Engineering team or a team it supports?* One should be to make a clear, concise value statement about a DncDevOps-owned area's usefulness related to daily work by dnceng or a team it supports, whether it be for reliability, security, or general hygiene purposes.
-
-If you can reasonably answer "yes" to this list of questions, you're likely looking at something that should be housed in the "operations" epic. There will be quite a bit of overlap between DncDevOps and First-Responders, especially in the beginning, as these teams are meant to function as a complementary pair. Things which are not feature work but aren't devops either include FR work that addresses immediate customer pain, build failure investigations, guiding users on Teams, and really most one-off work items
-
-## What is the process by which we onboard DncDevOps tasks?
-
-If you've read the above bar and have something you'd like to onboard as a operational responsibility (or make changes to the existing processes), please follow the below process:
-
-- Create an issue in the https://github.com/dotnet/core-eng repository with the 'Proposed-for-DncDevOps' label.
-
-Suggested issue template:
-
-```
-
-- [ ] This issue is relevant to daily .NET Core Engineering tasks
-- [ ] This issue describes ownership of an ongoing responsibility pertinent to the .NET Core Engineering Services team
-
-Related issue(s) / documentation:
-
-
-
-- Requirements / estimated daily cost of ownership:
-
-
-- (Known) automation debt to pay:
-
-
-- Current state:
-
-
-- Benefit of DncDevOps owning this:
-
-
-Proposed DncDevOps ownership:
-```
-
-- At least once a week, tech leads of the FR and operations teams will review all issues with this tag. If accepted, the issue will be either converted to an epic or directly added as a child of the "DncDevOps work" epic in its backlog, sorted by priority order at the discretion of these teams.
-
-- If rejected for DncDevOps, we can have the conversation about sending the issue to FR or general triage process. This does not mean the issues just go to a dumping epic; issues with real consistent pain, customer or internal, should not just 'rot'. Unfortunately sometimes this will necessitate the user who wants this addressed to continue advocating for it.
-- The goal is that automation will be added/found for as much of the process as reasonably possible, always striving for reuse / addition to existing systems.
-
-- For accepted tasks:
-
- - Work will be placed into the ZenHub backlog for the [Operations epic](https://github.com/dotnet/core-eng/issues/14471)
- - Using the terminology of the above linked document, at a high level the next steps are:
- - Phase 1: Gathering requirements (ideally this stage is already done, performed by whoever is authoring the issue, but the operations team will need to evaluate this and agree with it). This may be part of the acceptance process, since the requirements directly influence cost.
- - Phase 2: Designing the routine. By the end of this stage, documentation of process should be under https://github.com/dotnet/core-eng/tree/main/Documentation/process/{area-name}.
-
- - Documentation in this folder should include at least a "operations-info.md" based off [process-info-template.md](./process-info-template.md) containing:
- - Frequency of operations performed, with start/end dates as applicable
- - Details and links to any source code /
- - Step-by-step instructions which should be signed off by one not-the-author developer with subject matter expertise and one individual who has to own/execute the task.
- - Troubleshooting section - case studies of previous issues
- - Escalation ICs - SMEs who can be contacted if the troubleshooting and instructions are insufficient. If these people are contacted, there must be an update to the troubleshooting section even if it's just clarification of terminology.
-
- - Routines will be made up of pieces which fall into three general categories.
- - Fully automatable routines : For these, the work is just like any other epic, and the ongoing cost is simply to ensure that the automation is still running and react to alerts. Processes added this way still need to have an entry under the wiki page named below.
- - Non-automatable, simple ("highly procedural") routines: Things we can't or won't automate, but can be described by a simple flow chart and troubleshooted via any search engine, at least a majority of the time.
- - Non-automatable, complex routines: Same as above but which will require significant work, consultation with subject matter experts, proof-of-concept development, etc.
- - Phase 3: Tooling and telemetry implementation - Writing the automation we think we need to support the work
- - Phase 4: Validation: I've left out the "rollout" phase here because this kind of process is happening whether we are methodical about it or not, whatever we do. If the task fails validation, at any time, it should be reevaluated via a new issue in the operations epic tracking the work. In general this process is something like:
- - Executor of this task reads and clarifies documentation added in Phase 2 with team members before starting
- - Executor then goes through some number of iterations of the process (number at their discretion), taking notes every time they have to seek help and what the answers they got seeking such help were.
- - Operations team reviews the executor's notes as available and makes iterative improvements to documentation / automation.
- - Executor makes the final "this is ready" call; this is not to say that there won't be more clarification / addition in the future.
-
- - If vendors are to be used, a "statement of work" needs to be drafted (JasoWard on DDFUN has expertise) to ensure the most productive use of vendor resources. Process documentation needs to indicate clearly what success and failure indicators for this task are critical in making this process work.
- - Once deemed 'ready' to operate (generally entering Phase 4 above), ownership, a brief entry stating purpose, and links to relevant work items and process documentation will be listed under https://dev.azure.com/dnceng/internal/_wiki/wikis/DNCEng%20Services%20Wiki/107/DncEng-DncDevOps-Ownership
-
-- At a certain point, determined primarily by the personnel funding of the DncDevOps / FR teams, taking on new ownership will necessarily entail either simplifying (further automating / reducing scope), deprioritizing, or removing older tasks, as these tasks are expected to have a perpetual ongoing cost.
-
-## Examples
-
-The following are some sample tasks that the DncDevOps team might own on a continuing basis, with the above rubric used to justify why these would be done. At the moment these ideas mostly represent brainstorming but are listed in descending priority order.
-
-### Ensure all pipelines owned by the DncEng infrastructure team are kept in a passing, "green" state, with a playbook for taking action when broken.
-
- - **Established area?** Yes, (assuming we manage the list / dashboard for this correctly)
- - **Area under active development?** Mostly no: Current epics will break the builds somewhat but that's indirect.
- - **Repeating work?** Yes, it never ends.
- - **Pertinent to the daily functioning?** Yes; broken builds mean lost productivity.
-
-### Enforce our team's OS and artifact versioning / lifetime
-
-Track and drive patching (both operating systems and artifacts installed thereon), end-of-life-ing dead OSes, and updating physical machines used by the team. Keep base images used by Helix clients updated on https://github.com/dotnet/dotnet-buildtools-prereqs-docker, delete end-of-life OSes from this repo, and respond to non-customer requirements for these (security updates, moving to other repos, adding new images). Communicate at least two weeks in advance (via dncpartners dl) end-of-life for Helix queues and fully remove them from support.
-
- - **Established area?** Yes, dotnet-helix-machines is quite old
- - **Area under active development?** No epics occurring in machine management.
- - **Repeating work?** Yes, it never ends.
- - **Pertinent to the daily functioning?** Yes, very much. Keeping the images we use up-to-date improves security, testing (since we want to catch issues before the users), and establishing a process for this lets us uptake new functionality and versions needed by partner teams proactively.
-
-### Manage DncEng-owned service connections in a documented fashion (and possibly variable groups)
-
- - **Established area?** Yes or N/A: we've used them a long time but they're just an artifact we depend on.
- - **Area under active development?** No, they are a piece of how our inter-connected AzDO / GitHub projects function.
- - **Repeating work?** Since the PATs backing them always expire and require logging in as dn-bot to cycle, it never ends.
- - **Pertinent to the daily functioning?** Yes; broken connection means broken builds means lost productivity.
-
-### Regularly find and delete vestigial objects in Azure (old image galleries, defunct services, old forgotten repro VMs, etc). Automate reporting of this.
-
- - **Established area?** Yes: represents all the existing deployed production work we do in Azure
- - **Area under active development?** Typically not; this represents work that is deployed to production.
- - **Repeating work?** Yes; users may add random new objects to Azure at any time, leading to insecurity or spending waste.
- - **Pertinent to the daily functioning?** Yes, it is possible for these objects to consume limited Azure resources we expect to be reserved for what we expect to be there. Further, the objects may represent a security concern if we don't know why they're there, so knowing of their existence is very helpful.
-
-### Some general ideas for tasks outside of this but which would support operational quality:
-- Document investigation processes via core-eng wiki or elsewhere for vendor execution in non-exceptional circumstances.
-- Make templates and help communicate important changes in services provided by the DncEng team
-
-## Process / Rules for DncDevOps team
-
-The DncDevOps team will be focused on 'predictable work' - stuff that doesn't have an SLA for hours. Since the work will be largely done by the First Responders team in the beginning, the hours would likely follow that with some coordination with the Prague team when special requirements make it preferable.
-
-### Primary goals
-
-1) Inventory, monitor, and observe key pieces of .NET Core Engineering services infrastructure to keep the system in a secure, usable state
-2) Communicate important changes to customers with a consistent format and pattern via email and Teams
-3) Triage issues proposed to be operational using the bar above, and coordinate with the team for rejected issues outside this (goal being to not let issues "rot" whatever they are)
-4) Document and offload as much repetitive / manual work as can be to vendor ICs, freeing up time for investigation and improvement.
-5) Regularly engage with (and redirect where needed) customers who not seeking short-term redress to hear out their challenges / issues and represent this. Maybe hold office hours?
-
-### Organization
-
-- Unknown - this wil depend on the scope of tasks owned. Since the tasks are much more of the "continuously ongoing" type though versus responding to incoming customer pain, it makes sense to not cycle through ICs every week. I believe 1 or 2 FTE ICs and some number of vendors would likely serve the cause well.
-
-### Synchronization and Hand Off
-
-- As the First-responders team (FR) will be intimately involved with the DncDevOps team, it makes sense to unify the meeting used for both purposes. If this not acceptable by the FR lead, a regular morning-time (to allow Prague to participate) 15 minute daily Teams standup would be held.
-- Whenever feasible, high-priority customer-facing work should be handed off to the First Responders team as this would be outside the DncDevOps charter.
-
-## Filing Issues
-
-- DncDevOps issues need to be created in the dotnet/core-eng repository. The usual process of filing an issue with only a link to a devdiv.vs Azure DevOps TFS work item would be followed for any issues considered "security sensitive".
-- Use necessary tracking system for vendor work, which may be IcM or other non-GitHub system.
-- Issues should be tagged with the "DncOp" label and added to the DncDevOps ongoing epic.
-
-- Guidance for issue creation
- - Description of the task or bug
- - Where in the code base (which repo / project/ etc) the work should be done, if known.
- - Link to instructional documentation if a vendor task.
-
-## Notes
-
-This is a draft: This list is currently a set of ideas, with no specific order. Primarily, I am trying to write down things that keep the system going that either don't have a clear ownership today, or for which ownership could be transferred.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5COperations%5Cdnceng-operational-responsibilities.md)](https://helix.dot.net/f/p/5?p=Documentation%5COperations%5Cdnceng-operational-responsibilities.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5COperations%5Cdnceng-operational-responsibilities.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Operations/Helix-Machine-Management/Helix-Machine-Lifecycle-Processes.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Operations/Helix-Machine-Management/Helix-Machine-Lifecycle-Processes.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Operations/Helix-Machine-Management/Helix-Machine-Lifecycle-Processes.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Operations/Helix-Machine-Management/Helix-Machine-Lifecycle-Processes.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,281 +0,0 @@
-# .NET Core Engineering Services Operations info: - Lifecycle management for Helix Queues and .NET Core Engineering Build Images
-
-## Process Details
-
-### Glossary of terms:
-
-- **End of life (EOL)**: The date at which an operating system is no longer supported by its publisher. Publishers will often provide more than one timeline for this (often charging customers money for longer terms). For .NET Core's purposes, this is usally the longest possible period where security vulnerabilities will be addressed as this is what customers running .NET on these operating systems expect, as we will need to be able to continue testing the shipped product on these operating systems until the final days of their support lifecycle.
-
-- **Estimated Removal Date (ERD)**: The date that .NET Core Engineering Services intends to remove a test queue or build image.
-This date is meant to force conversations to be had and actions to be taken, and is not meant to indicate a customer promise of .NET Core's OS support. The date may be before, or after the EOL date (preferably before) at DncEng's discretion. With the exception of the dotnet-helix-machines build where it can become an error once elapsed, this time is only used to inform warnings to users. Estimated removal dates thus can be arbitrarily extended with sufficient cause (leaving history behind in Git commits of who did it and hopefully why), but the goal is to never have unsupported and un-patch-able operating systems managed by the team.
-
-- **Update Required Date (URD)**: The date that .NET Core Engineering Services will next need to take action to update an image used in Helix test machines or 1ES hosted build pool images. This can be any date up to the estimated removal date, but not after it. This date is designed to allow the .NET Core Engineering Services team an opportunity to update images that need no user action (for instance: Updating to a newer, non-EOL, OS version with all the same artifacts where the users don't need to have direct communication about this.) When an existing image includes version information that makes this impossible (e.g. "19H1" is in a queue name but its corresponding Windows version is EOL) the Update Required date should be set to the same value as EstimatedRemovalDate.
-
-- **Matrix of Truth**: ([Epic issue link](https://github.com/dotnet/core-eng/issues/11077)) Ongoing work to provide a single source of information for operating system test matrix and life cycle for .NET Core Engineering systems.
-
-- **Helix Queue**: Set of machines, whether they are an Azure VM Scaleset or physical machines, which execute test work items.
-
-- **Build image**: Azure Compute Gallery image used to populate 1ES Pool provider instances used for all .NET Core builds. Due to executive order, we must perform all builds through images pushed to this system.
-
-### Summary:
-
-#### Why does this "process" exist in the first place?
-
-Previously, removing old Helix queues and images was a best-effort process. This has several major problems including:
-
-- It causes us to continue using images that no longer receive security patching, leading to potential attacks as well as definitely causing monitoring applications to detect these machines and cause us to react to this.
-- Helix VM and on-prem machine capacity is divided amongst whatever capacity we have. If we keep around old Helix queues (on-prem or in Azure), this limits the ability to provide this capacity in still-supported OSes.
-- Users would frequently only find out on rollout that their queue/image had been removed.
-
-#### Who benefits?
-Helix users benefit from secure, patched machines and a regular communication of when what they use will be deprecated. The .NET Core product teams get the most secure and accurate-to-real-users'-machines images (that is, containing all the recent patches and updates to operating systems and components which could affect test / product behaviors) possible to run their tests or builds on.
-
-#### What happens if it doesn't happen?
-Without a regular process, we will be bogged down with responding to alerts for VMs or other resources using 'old' images, and even ignoring any security-related implications (reasonable for most Helix test machines, less so for build images), we will continue to pay for storage and compute costs for no-longer-supported operating systems.
-
-### Process Boundaries
-
-- Related repositories:
- - https://dnceng.visualstudio.com/internal/_git/dotnet-helix-machines --> Source control for all Helix VMs, 1ES Pool provider images, and Helix on-premises machine setup functionality
- - https://github.com/dotnet/dotnet-buildtools-prereqs-docker --> Source control for a wide variety of docker images automatedly published to Microsoft Container Registry (MCR). Images used for testing Helix "docker" images come from here. Work will eventually be done to move most of this image generation into the dotnet-helix-machines repo.
-- Task scope:
-
- - In scope:
- - Monitor [dotnet-helix-machines](https://dnceng.visualstudio.com/internal/_build?definitionId=596) pipeline and respond to warnings / errors produced by EstimatedRemovalDate and UpdateRequiredDate times elapsing by either updating the images referenced and/or extending these dates, or removing these queues.
- - Whenever extending a date further out than the lifespan of an existing operating system, comments should be added above this (or at least in commit messages) explaining the extension.
- - Coordinate with rollout owners to ensure that upcoming removals and image refreshes (any queue that has produced a warning in the dotnet-helix-machines "Validate" stage) are included in rollout status emails.
- - On a monthly cadence, review and update the EstimatedRemovalDate / UpdateRequiredDate values of images generated by the dotnet-helix-machines repository for accuracy. Use .NET release PM team and the internet for deciding dates.
-
- - Not in scope:
- - Deciding the OSes for which we will provide images. (tracked by https://github.com/dotnet/arcade/issues/8832)
- - Patching of Operating systems or updating of artifacts (tracked by https://github.com/dotnet/arcade/issues/8813)
-
-- Contacts for non-owned parts of the process: For external ownership, who can we talk to?
- - For end-of-life operating systems, the release PM team owns the final word of when we can actually get rid of support. Certain operating systems can and will be maintained outside their publicly-communicated lifespan, usually owing to some important customer need. The release PM team is composed of Jamshed Damkewala (jamshedd), Rich Lander (rlander), Lee Coward (leecow), and Rahul Bhandari (rbhanda); contact them for any questions in this space. Some examples of operating systems we support beyond their normal end-of-life include Ubuntu 16.04 and Windows 7 / Server 2k8R2, but there will be more exceptions.
- - DncEng "matrix of truth": IlyaS, general SME : MattGal
-
-### Process Inputs / Outputs
-
-#### SME information:
-
-Descriptions of what/where the inputs to the process come from (the answer to "what do I or the automated process neeed to consider to perform this task?"), and what performing the below steps correctly achieves ("what comes out the other side?")
-
-Inputs:
-- The OS "Matrix of Truth" (future output from https://github.com/dotnet/arcade/issues/8832). Until this matrix exists, what we have is considered "in matrix" but we need to be removing unsupported OSes.
-- The .NET Core release team's input (they receieve notifications for OS removals from build/test support via the partners DL, and can veto this with justification)
-- EstimatedRemovalDate warnings / errors from the dotnet-helix-machines-ci pipeline
-
-Outputs:
-- (Where needed) Removal of Helix image definition from dotnet-helix-machines repository.
-- (Where needed) Updates of Helix base image or image references in the dotnet-helix-machines repository, or tracking issues for this work
-- Communication with the release PM team ensuring any "first time" removals of a given OS are acceptable; this team may veto this and will provide new estimated removal dates if they do
- - Once the "Matrix of Truth" epic is complete and this is approved by the release PM team, we may consider no longer notifying them)
- - If not removing all instances of an OS at once (e.g. if removing build images while leaving test queues), mark the remaining instances of the OS with a comment indicating removal has already been approved so this step may be skipped
-- Communication blurb following the below template in weekly rollout email. As these warnings are designed to appear in the dotnet-helix-machines official build starting 3 weeks prior to expiration, it should consistently allow the current week's rollout (or "not rolling out" mail) to indicate that users will soon need to take action.
-
-#### Example Communication
-
-The following Helix Queues and/or Build images will be removed on the Wednesday rollout following the estimated date. Please remove usage of these queues/images before this date to keep your pipelines and tests functional.
-
-Helix Queues:
-
-| Queue Name | Estimated removal date |
-| - | - |
-| Some.Helix.Queue | 03/14/2022 |
-| Some.Other.Helix.Queue | 03/10/2022 |
-
-1ES Hosted Pool Images
-
-| Image Name | Estimated removal date |
-| - | - |
-| Build.OperatingSystem.KindOfImage | 03/21/2022 |
-| Build.DifferentOperatingSystem.KindOfImage | 03/22/2022 |
-
-Removing no-longer-supported operating systems on a regular cadence both allows us to be as secure as possible and use more of the resources we have available more for still-supported platforms.
-If you feel this removal is in error, or believe a specific expiration should be extended, please email dnceng@microsoft.com with your concerns.
-
-### Execution Steps
-
-#### Fully-automatable routines:
-- EstimatedRemovalDate & UpdateRequiredDate notifications - Part of the dotnet-helix-machines-ci pipeline, these must be both in the future and estimated removal date be >= update required date or the build will fail.
-- Links:
- - Documentation: https://dnceng.visualstudio.com/internal/_git/dotnet-helix-machines?path=/definitions/readme.md
- - Pipeline: https://dnceng.visualstudio.com/internal/_build?definitionId=596
- - ImageFactory Wiki (includes access instructions): https://dev.azure.com/devdiv/XlabImageFactory/_wiki/wikis/XlabImageFactory.wiki (includes access instructions)
- - .NET PM team's OS Version Management calendar: https://dev.azure.com/devdiv/DevDiv/_wiki/wikis/DevDiv.wiki/12624/OS-Version-Management-Calendar-2022 (Adjust year for the current year)
-
-- Known issues impacting the area: None
-- Known tech debt: [Matrix of Truth](https://github.com/dotnet/arcade/issues/8832) work is not complete yet; once this is done we need to make sure this integrates into the existing system for update/removal.
-
-#### Manual processes:
-
-##### Daily:
-
-- Review [the day's main pipeline executions](https://dnceng.visualstudio.com/internal/_build?definitionId=596&_a=summary&repositoryFilter=3&branchFilter=152037)
-- If any warnings about EOL queues arise (see guidance for specific types below):
- - Check whether this removal represents the "last" of this operating system (no other queues have this OS). If so, get confirmation from the Release PM team or confirmation from “Matrix of Truth” to ensure its removal is acceptable and mention this in issues.
- - If removal is deemed inappropriate, make pull requests to the dotnet-helix-machines repo extending the time to a new, agreed-upon, date.
- - If pull requests are created, monitor subsequent builds in the pipeline until it has succeeded;
- - Open issues in the dotnet/arcade for all actions, with the "First Responder" tag. Add to list of queues for end-of-week update.
- - Create pull requests removing these test or build images after the current week's rollout in the week they will be removed, and follow these until merged.
-
-- If any warnings about "Update-required" images arise: (e.g. "`##[warning] has update-required date: YYYY-MM-DD, in the next three weeks. Please either update the image to newer, file an issue requesting this, or extend the date with a comment explaining why if no action is taken.`")
- - Check whether updated images exist:
- - Refer to the yaml for these images, found under the "[definitions](https://dnceng.visualstudio.com/internal/_git/dotnet-helix-machines?path=/definitions)" folder of [the dotnet-helix-machines repo](https://dnceng.visualstudio.com/internal/_git/dotnet-helix-machines). Some windows images may be found in [definition-base\windows.base.yaml](https://dnceng.visualstudio.com/internal/_git/dotnet-helix-machines?path=/definition-base/windows.base.yaml)
- - Find the image referenced in the yaml, or directly inside definitions\shared in the dotnet-helix-machines repository.
- - Run the osob CLI. The following commands assume you have .NET Core runtime on the path and are inside the `tools\OsobCli` folder of this repository.
- - `dotnet run list-image-versions -l westus -d ..\..\definitions`
- - `dotnet run list-imagefactory-image-versions -d ..\..\definitions`
- - Images with newer versions available will look like:
-```
-For :
- Current version:
- Latest available version:
- ** Upgrade! **
-```
- - If there are updated versions of the image:
- - Wherever "Current Version" and "Latest Version" do not match, modify the image version to match the version printed out by the OSOB CLI tool above
- - Set the new `UpdateRequiredDate` to 90 days after the previous date, or the `EstimatedRemovalDate`, (whichever comes first).
- - Create a pull request with these changes and follow until merged.
- - If there are no updated versions of the image, simply set the update-required date to 90 days past the previous one
- - After merging any pull requests where multiple images are updated, as usual monitor "main" branch builds until they are successful.
-
-- If the main build has failed (red):
- - Ping [DncEng First Responders Teams Channel](https://teams.microsoft.com/l/channel/19%3aafba3d1545dd45d7b79f34c1821f6055%40thread.skype/First%2520Responders?groupId=4d73664c-9f2f-450d-82a5-c2f02756606d&tenantId=72f988bf-86f1-41af-91ab-2d7cd011db47) and ask for next steps.
- - Optionally, create issue in dotnet/arcade with the "First Responders" label requesting investigation.
-
-##### Weekly (Currently, most rollouts are on Wednesdays):
-- Find the rollout pipeline. Rollout occurs around 9 AM in the [dotnet-helix-machines-ci pipeline](https://dnceng.visualstudio.com/internal/_build?definitionId=596&_a=summary) with branch 'production'. Contact the dnceng team if this is not occurring or you lack permissions.
-- Even if there are no tasks to be done, always create an issue using [this template](https://dnceng.visualstudio.com/internal/_workitems/create/Task?templateId=7599e1ed-4c83-45cd-ad97-10ce36dbbb20&ownerId=3f024d6c-9884-4f38-a598-025fca9dfcd2) and put it into the "active" state.
-- Fill out the sections of the template:
- - Link to completed pipeline
- - Status of the rollout (completed normally, aborted, rolled back, etc)
- - List of any images/queues producing warnings in the pipeline (errors will stop us from running it)
- - Create new issues in [dotnet/arcade](https://github.com/dotnet/arcade/) identifying the queue and linking to the build. Include links to these issues under the "queues that will expire in the next 7 days" or "after the next 7 days" headings as appropriate.
-- When all the above is complete for a given week, you may close the issue.
-
-##### Monthly (closest business day to the 15th):
-**Request DDFUN updates for on-premises Helix machines**
-
-We depend on our partner team DevDiv Fundamentals ("DDFUN") for machine maintenance tasks. DDFUN vendors use table "OsobInventory" in storage account 'helixscripts2' to view the machines. We want any machine that shows up in HelixEnvironment "Production" with Partition Key not equal to "Not in Helix" to be at least reviewed for update monthly. See @MattGal or @Ilyas1974 if you believe you need to gain read access to this table. If new queues are found in the table that are not below, please make a pull request to update the list.
-
-We need to ensure and drive that the machines we are running are updated to the latest possible versions. Since we cannot rely on AzSecPack and automated reporting for these updates, we'll generate IcM tickets requesting this. For each of the three categories of operating system below (Linux, MacOS, and Windows) please create an IcM ticket [here](https://ddfunlandingpagev120210311150648.azurewebsites.net/) using the "[Other](https://portal.microsofticm.com/imp/v3/incidents/create?tmpl=E3619N)" template.
-
-After creating such an IcM for each of the three groups of machines, use [this template](https://dnceng.visualstudio.com/internal/_workitems/create/Task?templateId=5ad0c1bc-5e95-45a1-b40b-de81c12b5b4a&ownerId=3f024d6c-9884-4f38-a598-025fca9dfcd2) to link the three IcM tickets, and keep the issue open until all three are resolved.
-
-
-Suggested IcM Description:
-```
-Machines in the following queues need to be updated to their latest patch versions.
-
-
-
-- No major version updates should occur in operating systems (e.g. do not allow a Windows 10 system to update to Windows 11, or OSX 10.15 to update to 11.0/12.0)
-- For windows, Windows update should be executed until it stops prompting for changes.
-- For MacOS, use the provided UI to take system updates while remaining in the same release band.
-- Where possible (use judgment), if linux package managers have recommended updates these should be taken.
-```
-
-Linux machines (raspbian 9 iot devices should NOT get updated)
-
-- alpine.amd64.tiger.perf
-- ubuntu.1804.amd64.owl.perf
-- ubuntu.1804.amd64.tiger.perf
-- ubuntu.1804.amd64.tiger.perf.open
-- ubuntu.1804.arm64.perf
-- ubuntu.1804.armarch
-- ubuntu.1804.armarch.open
-
-MacOS Machines
-
-- osx.1015.amd64
-- osx.1015.amd64.appletv.open
-- osx.1015.amd64.iphone.open
-- osx.1015.amd64.iphone.perf
-- osx.1015.amd64.open
-- osx.1100.amd64
-- osx.1100.amd64.appletv.open
-- osx.1100.amd64.open
-- osx.1100.amd64.scouting.open
-- osx.1100.arm64
-- osx.1100.arm64.appletv.open
-- osx.1100.arm64.open
-- osx.1200.amd64.iphone.open
-- osx.1200.amd64.open
-- osx.1200.arm64
-- osx.1200.arm64.open
-
-Windows Machines
-
-- windows.10.amd64.19h1.tiger.perf
-- windows.10.amd64.19h1.tiger.perf.open
-- windows.10.amd64.20h2.owl.perf
-- windows.10.amd64.android.open
-- windows.10.amd64.galaxy.perf
-- windows.10.amd64.pixel.perf
-- windows.10.arm32
-- windows.10.arm32.iot
-- windows.10.arm32.iot.open
-- windows.10.arm32.open
-- windows.10.arm64
-- windows.10.arm64.appcompat
-- windows.10.arm64.open
-- windows.10.arm64.perf
-- windows.10.arm64.perf.surf
-- windows.10.arm64.tof
-- windows.10.arm64v8.open
-- windows.11.amd64.cet
-- windows.11.amd64.cet.open
-
-##### Possible issues you may encounter:
-
-When the dotnet-helix-machines-ci build to be rolled out fails with `{QueueOrImageName} has estimated removal date: {c.EstimatedRemovalDate}, which has elapsed. Either extend this date in the yaml definition or remove it from usage to proceed.`:
-
-1. Review the date and confirm using internet search / .NET Release PM Team calendar. **Note the "Matrix of Truth" data, once it exists, supersedes any data in "Estimated Removal dates"
-2. If the date is incorrect, consult with DncEng Operations v-team (or, use judgment) to determine a new date and extend it via pull-request to this repository
-3. If the date is valid, remove the definition via pull request to this repository. If we have erroneously never communicated this state, you may use discretion to set a date sometime in the future to keep the current status quo, but please include a comment over the definition explaining why.
-
-
-When the build to be rolled out contains warnings with `{QueueOrImageName} has estimated removal date: {Date}, in the next three weeks. Please include this in communications about upcoming rollouts.`:
-
-1. Review the date and confirm using internet search / .NET Release PM Team calendar and release notes. Example: [.NET Core 6.0 release notes](https://github.com/dotnet/core/blob/main/release-notes/6.0/supported-os.md)
-2. If the date is incorrect, consult with DncEng operations v-team to determine a new date.
-3. If the date is valid, work with the .NET Engineering Services Rollout team ([Teams Channel](https://teams.microsoft.com/l/channel/19%3a72e283b51f9e4567ba24a35328562df4%40thread.skype/Rollout?groupId=147df318-61de-4f04-8f7b-ecd328c256bb&tenantId=72f988bf-86f1-41af-91ab-2d7cd011db47)) to ensure that this is communicated with every partners "rollout" email until the removal has occurred.
-
-The similar warnings/errors for Update Required look like the below. Their only real difference is that these updates only are shared with the rollout team / customers in the case where a "significant" change occurs (e.g. updating the semi-annual Windows version).
-
-`{QueueOrImageName} has update-required date: {Date}, which has elapsed. Either extend this date in the yaml definition (add comments if relevant), or remove it from usage to proceed.`
-`{QueueOrImageName} has update-required date: {Date}, in the next three weeks. Please either update the image to newer, file an issue requesting this, or extend the date with a comment explaining why if no action is taken.`
-
-#### Known issues impacting the area:
-- We regularly have difficulty generating "novel" new images; when this occurs whoever is driving the process should extend dates (with comments why) as proactively as possible, since we'd like to minimize communication to users that implies actions are needed on their part if we know we don't have something for them to upgrade to.
-- Known tech debt: Completion of "Matrix of Truth" work (needed for unified tracking of expiration dates)
-- Troubleshooting guide per-step, ideally tested by execution by an individual unfamiliar with the feature area(s) involved: None for now, can add as users hit issues.
-
-#### Troubleshooting:
-
-- Ensure all EstimatedRemovalDate/UpdateRequiredDate values are expressed in YYYY-MM-DD format.
-- Use DncEng team for guidance when investigating errors
-
-### Validation Steps
-
-After completing manual steps: (cadence TBD, but probably weekly before rollouts), perform the following checks and make a note in https://dev.azure.com/dnceng/internal/_wiki/wikis/DNCEng%20Services%20Wiki/512/Helix-Machine-Management-Operations-Notes
-
-Template (insert at top of wiki mentioned abov)
-```
-Date:
-Executor of manual checks: (Github or MS alias)
-Link to Production rollout pipeline:
-
-Pipeline state:
-- Monitor the next dotnet-helix-machines pipeline execution. No warnings or errors related to EstimatedRemovalDate time elapsing should be seen. Provide a link to this pipeline execution. Validate that all queues expected to be deleted did actually get deleted (this can be seen in the "Run DeployQueues" step of the Deploy Queues job).
-- Monitor First Responders Teams channel for surprised users; in the case of erroneous deprecation, work with DncEng team for a hot fix. In the case of expected removal, use discretion and help unblock the user.
-
-Notes:
-- Anything interesting or unusual that happened as part of this week's check-in.
-- Issue(s) falling out of the process for this week:
-```
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5COperations%5CHelix-Machine-Management%5CHelix-Machine-Lifecycle-Processes.md)](https://helix.dot.net/f/p/5?p=Documentation%5COperations%5CHelix-Machine-Management%5CHelix-Machine-Lifecycle-Processes.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5COperations%5CHelix-Machine-Management%5CHelix-Machine-Lifecycle-Processes.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Operations/Templates/process-info-template.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Operations/Templates/process-info-template.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Operations/Templates/process-info-template.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Operations/Templates/process-info-template.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,84 +0,0 @@
-# .NET Core Engineering Services Operations info: - {Process Name}
-
-## Process Details
-
-### Summary:
-Why does this "process" exist in the first place? Who benefits? What happens if it doesn't happen?
-
-### Process Boundaries
-
-What is, and isn't part of this process? Provide data that provides boundaries both in terms of code base and time. As this is a template, feel free to remove / add as applicable. If you find yourself adding content that should be global, please make a pull request to this template doc.
-
-- Related repositories: Links to relevant repos where work will be done
-- Task scope: List guidance/examples of in-scope and out-of-scope for the task
-- Contacts for non-owned parts of the process: For external ownership, who can we talk to?
-
-### Process Inputs / Outputs
-
-Descriptions of what/where the inputs to the process come from (the answer to "what do I or the automated process neeed to consider to perform this task?", and what performing the below steps correctly achieves ("what comes out the other side?")
-
-Examples:
-
-Inputs:
-- Base Docker images from DockerHub.io / mcr.microsoft.com
-- Gallery Images available from the Azure Portal
-- Package versions from public / internal NuPkg feeds
-- State of the objects in an Azure Subscription
-
-Outputs:
-- Updated dependencies / images
-- Changed state of
-- Assorted reports or telemetry used in reporting
-
-### Execution Steps
-
-These steps will vary based off the type of operation involved. As your process may use any combination of automated / manual stages, use what you need from the template and delete
-
-#### Fully-automatable routines:
-- Description of the process
-- Links to:
- - Source code, any other (say, in-repo) documentation for the automation
- - Pipelines associated with build / deployment of relevant components.
- - Telemetry pages / Grafana alerts related to this process
-- Known issues impacting the area
-- Known tech debt that may cause validation "blindness"
-
-#### Manual processes:
-- Step-by-step description of the process in sequential markdown list format.
-- Known issues impacting the area
-- Known tech debt that may cause validation "blindness"
-- Troubleshooting guide per-step, ideally tested by execution by an individual unfamiliar with the feature area(s) involved.
-
-
-#### Troubleshooting:
-
-List of what to do when "known" things go wrong. When a new problem occurs and requires investigation and fixing, it should be added here.
-
-
-### Validation Steps
-
-After completing manual steps, or on some regular cadence (to be determined), list any follow up checks/activities that need to be done, including things like:
-
-- Which build(s) need to be in a green state
-- Sites to check
-- "Smoke testing" steps for functionality known to lack automation/ have historical regressions
-
-#### Checklist for reviewing this document
-
-This part is more for guidance but can be retained in documents deriving from the template; it gives the writer a means to try to warn against any recurring problems seen.
-
-- Have the supplied steps been executed by a non-SME, non-author IC?
-
-- Do any references to resources include how to obtain and which security permissions are required (if any)
-
-- Are links pointing to other documents or locations valid?
- - Will they be readable by the target audience? If restricted, do they tell the reader where to gain access?
- - Will they continue to exist in the future? (some links, like non-retained AzDO builds, are impermanent)
-
-- Is/are there at least one (ideally two) SME IC(s) listed as contact for clarification?
-
-- Does the document specify sufficient detail that an artibrary reader would be able to reason about and execute the processes described?
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5COperations%5CTemplates%5Cprocess-info-template.md)](https://helix.dot.net/f/p/5?p=Documentation%5COperations%5CTemplates%5Cprocess-info-template.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5COperations%5CTemplates%5Cprocess-info-template.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Policy/ArcadeContributorGuidance.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Policy/ArcadeContributorGuidance.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Policy/ArcadeContributorGuidance.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Policy/ArcadeContributorGuidance.md 2023-11-13 13:20:34.000000000 +0000
@@ -1,5 +1,5 @@
# Contributor Guidance for Arcade
-Over and above the [.NET Core contributor guidelines](https://github.com/dotnet/coreclr/blob/master/Documentation/project-docs/contributing.md) (which are important), there are some principles and guidelines that are specific to Arcade as well.
+Over and above the [.NET Core contributor guidelines](https://github.com/dotnet/runtime/blob/main/CONTRIBUTING.md) (which are important), there are some principles and guidelines that are specific to Arcade as well.
For the most part, contributions to Arcade are straightforward and relatively smooth. However, from time to time, getting changes in can be challenging, and even frustrating. The very nature of Arcade is that it's shared across multiple teams. This document attempts to clarify some of the expectations as well as provide some 'advice' for success when contributing to Arcade.
@@ -20,8 +20,6 @@
### Ownership
The current owner for dotnet/arcade is Mark Wilkie . The current point persons are:
-- Alex Perovich @alexperovich /
-- Jon Fortescue @jonfortescue /
- Michael Stuckey @garath /
- Ricardo Arenas @riarenas /
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Policy/TeamProcessPolicy.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Policy/TeamProcessPolicy.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Policy/TeamProcessPolicy.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Policy/TeamProcessPolicy.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,11 +0,0 @@
-# Team Process Policy
-
-- For all work done, a GitHub issue must exist, this includes, but not limited to:
- - Every pull request should reference back to a GitHub issue (if the PR is in AzDO, include a link to the GitHub issue in the PR details).
- - Every rollout must have a GitHub issue to document the change to production.
- - Every hotfix and deployment rollback must have a GitHub issue to document the change to production.
-- All dependency additions/changes, such as version upgrades must be approved through team management (e.g. Application Insights)
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CPolicy%5CTeamProcessPolicy.md)](https://helix.dot.net/f/p/5?p=Documentation%5CPolicy%5CTeamProcessPolicy.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CPolicy%5CTeamProcessPolicy.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Policy/TechnicalDebtPolicy.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Policy/TechnicalDebtPolicy.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Policy/TechnicalDebtPolicy.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Policy/TechnicalDebtPolicy.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,51 +0,0 @@
-# Driving technical debt principles:
-1. We must not add new technical debt. (unless there’s an explicit business decision to do so)
-1. We must reduce our current debt – reasonably over time.
-
-It should be said that it’s easy to miss some of this stuff, so we should feel free to hold each other accountable in code reviews. In fact, getting more active participation in code reviews across the board would be fantastic.
-
-## Stop increasing technical debt:
-We need do more than just say “don’t add debt”. We don’t always know what this means, we have business pressures which make this difficult at times, and we don’t necessarily know where to start. Actually doing this requires a combination of concrete changes in policy, and continued cultural changes. (yes, they’re intertwined…) But the good news is that we’re ready as a team to tackle this together.
-
-### Policy: (automation of checks wherever we can will be important for sustainability)
-- The following need confirmation from another senior dev. (see ‘ getting confirmation’ below for details)
- - New dependencies (for example, this leads to inconsistencies within the project – making for a mess)
- - New end points or services
- - Duplication of code
- - New language or language version
- - Use of a prelease service/library
-- Appropriate test coverage
- - Make it more obvious the extent of test coverage. For example, show code coverage percentage as part of the PR results.
- - An initial bar of at least 50% code coverage –80% better. Exceptions can be given as appropriate. (either up or down) The point is to get the right coverage, not just get a number…. (For example, see here for our current code coverage)
-- Documentation should exist (to say the obvious, we still need better clarity on how we document in general – but that’s outside the scope of this email)
- - It needs to be usable by the intended customer
- - It needs to be discoverable
- - Appropriate existing documentation updated (remember this is debt as well)
-- Features need a design “one pager” and confirmation from another senior dev
- - Bug fixes and/or small items don’t need this
- - For now, let’s put design into the epic itself so we can always find it
-
-### Cultural Changes
-- Brown bag at least once a year to recount war stories. The goal is to keep the “why” top of mind.
-- Retrospectives where/when appropriate.
-- General management support (including active participation) for encouraging/enforcing this new policy on our PRs. It’ll hurt some at times…
-- Encourage conversations about how we keep our quality up, debt low, and general improve our technical offerings.
-- Discuss in our monthly team meeting from time to time
-
-### Getting Confirmation for a change/proposal
-- Email ‘dnceng’ for broad awareness
-- Get at least one other senior dev to ACK
-- It should be noted that we’ll make mistakes. We can only make decisions based on what we knew at the time. The expectation is less about building zero debt, but rather changing our culture – e.g. the way we think about these things.
-
-## Reduce existing technical debt:
-It would be unreasonable to do nothing other than pay down our technical debt as we have a business to run as well. However, we can focus on a specific area and make a big impact.
-
-### Policy:
-- There should always be a business priority on the board that actively focuses on reducing technical debt. (right this minute we have two, Helix, and Validation)
-- We should limit debt reduction business priorities to one at a time. This is to help stay away from “peanut butter” (spreading effort over a wide area) and make significant progress reducing debt in one area. It’s sorta like choosing a credit card balance with the highest interest to pay down first.
-- There may be times where the business dictates that we don’t have a debt reduction business priority on the board. However, this should be the exception and requires M2 approval as it would go against one of our core principles. For a review, remember to check out our guiding principles.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CPolicy%5CTechnicalDebtPolicy.md)](https://helix.dot.net/f/p/5?p=Documentation%5CPolicy%5CTechnicalDebtPolicy.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CPolicy%5CTechnicalDebtPolicy.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/agentless-helix.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/agentless-helix.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/agentless-helix.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/agentless-helix.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,55 +0,0 @@
-# Allow for waiting for helix to use agentless tasks
-
-## Overview
-We want to reduce wasted compute caused by build jobs waiting for helix tests to complete. To do this we can use the "agentless job" feature in azure pipelines to remove the machine that waits for the helix job to be finished.
-
-## Stakeholders
-- Our Budget
-- Customers who want to retry test jobs without having to rebuild
-
-## Pros
-- Build machine time is no longer wasted on waiting for helix jobs
-- Test job retry doesn't require rebuilding the tests
-
-## Cons
-- Msbuild no longer waits for the helix jobs, so code written in msbuild to process job results needs to either go away or be rewritten server side.
-
-## Current Workflow
-The current build jobs that run helix work do the following:
-1. Acquire build agent from pool
-1. Compile code and create helix payloads and job list
-1. Start azure pipelines test runs
-1. Send Job to Helix via rest api
-1. Poll rest api periodically to check status
-1. Once job reports finished, verify results and finalize test runs
-1. Return build agent to pool
-
-This workflow holds on to a build agent for a long period where it is doing nothing but polling a rest api for status, this period can be removed.
-
-## New Workflow
-Using agentless jobs, we can change this flow to the following:
-1. Acquire build agent from pool
-1. Compile code and create helix payloads and job list
-1. Save job information to output variable
-1. Return build agent to pool
-1. In agentless job:
- 1. Read output variable from previous job/stage and send request to helix api using [Invoke Rest Api](https://learn.microsoft.com/en-us/azure/devops/pipelines/tasks/utility/http-rest-api?view=azure-devops)
- 1. Using the "Callback" completion mode agentless job waits for notifications from helix service
-1. In Helix Service
- 1. Receive request from agentless job and store information
- 1. Start test runs
- 1. Start helix job execution
- 1. Wait for job completion - send progress logs back to azure pipelines
- 1. Report test results for workitem statuses
- 1. Finish test runs and check for job pass/fail status
- 1. Report task completion to azure pipelines
-1. Agentless job finishes after receiving notification from helix service
-
-This workflow gives back the build agent to the pool shortly after finishing the build, and allows the helix jobs to run without requiring a build agent to be listening.
-The agentless job can also be retried to re-run the helix job(s) without requiring a rebuild.
-
-## Proof of Concept
-I have a proof of concept in this PR https://github.com/dotnet/arcade/pull/10342. The agentless job can be seen running here https://dev.azure.com/dnceng-public/public/_build/results?buildId=55561&view=logs&j=830c6850-7aa7-5384-6c90-1a1a71217f4b.
-
-## Implementation details
-This solution requires a web api endpoint on our server that handles the request from the agentless job, then a service running inside our cluster that starts and monitors jobs based on these requests. This service will need to fixup the job payloads with test run information, then monitor execution and report status back to azdo.
Binary files /tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/assets/GrafanaAlert.png and /tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/assets/GrafanaAlert.png differ
Binary files /tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/assets/MachinesOffline.png and /tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/assets/MachinesOffline.png differ
Binary files /tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/assets/MultipleSubs.png and /tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/assets/MultipleSubs.png differ
Binary files /tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/assets/QueueCoreSetup.png and /tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/assets/QueueCoreSetup.png differ
Binary files /tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/assets/QueueSecurityBuild.png and /tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/assets/QueueSecurityBuild.png differ
Binary files /tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/assets/RedirectJobsWorkflow.png and /tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/assets/RedirectJobsWorkflow.png differ
Binary files /tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/assets/Telemetry.png and /tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/assets/Telemetry.png differ
Binary files /tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/assets/toolversions.png and /tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/assets/toolversions.png differ
diff -Nru "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/AutoRetryDocumentation.md" "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/AutoRetryDocumentation.md"
--- "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/AutoRetryDocumentation.md" 2023-10-18 18:08:29.000000000 +0000
+++ "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/AutoRetryDocumentation.md" 1970-01-01 00:00:00.000000000 +0000
@@ -1,31 +0,0 @@
-# Documentation for Auto Retry of Failures in Helix
-The following document shows the user documentation of the auto retry feature of test failures in helix according to the [requirements](https://github.com/dotnet/arcade/blob/main/Documentation/Project-Docs/Auto-Retry%20Failures/Overview-Requirements.md).
-
-# What is Auto Retry?
-When a Test fails in Helix, Auto Retry feature provides the ability to automatically retry the test until it passes or crosses the number of retries specified by the user and communicate to the user that the test has passed only after retrying in Mission Control.
-
-# How to configure Auto Retry?
-By adding the optional property (MaxRetryCount) to the MSBuild definition of a repo is all that is needed to be done to turn Auto Retry on.
-For eg.
-For CoreFx, Add ` (Value of allowable retries) ` under the `` in https://github.com/dotnet/corefx/blob/master/src/upload-tests.proj#L11-L79. Once the build picks up the optional property, tests will automatically requeue failed work items to be retried until the MaxRetryCount is reached / the work item passes.
-
-# Mission Control
-In order to see the information in [Mission Control], the user must **Log in**, otherwhise no information will be available.
-
-If a work item passed on a retry, the information will be displayed by default along with the count of items that had passed on retry.
-![](./Images/WorkItemAggregateSummary_Count.JPG?raw=true)
-
-Each Work Item that was passed on retry will display with a retry icon and tool tip showing `Intermittent Failures`
-![](./Images/WorkItemAggregateSummary_Icon.JPG?raw=true)
-
-Each attempt of retry details can be viewed from the Tests Details Page.
-![](./Images/Logs.JPG?raw=true)
-
-# How to give feedback?
-Please create a new issue in the [core-eng](https://github.com/dotnet/core-eng) repository.
-
-[Mission Control]: https://mc.dot.net/#/
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CAuto-Retry%20Failures%5CAutoRetryDocumentation.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CAuto-Retry%20Failures%5CAutoRetryDocumentation.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CAuto-Retry%20Failures%5CAutoRetryDocumentation.md)
-
diff -Nru "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/AutoRetryTelemetry.md" "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/AutoRetryTelemetry.md"
--- "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/AutoRetryTelemetry.md" 2023-10-18 18:08:29.000000000 +0000
+++ "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/AutoRetryTelemetry.md" 1970-01-01 00:00:00.000000000 +0000
@@ -1,72 +0,0 @@
-# Overview
-This document provides Kusto Queries to pull telemetry data from Kusto DB for Auto-Retry of Test Failures.
-
-## WorkItems that belong to Jobs that have total pass and has items / tests that passed on retry:
-
-```
-let jobs = Jobs
-| where Source == [source_name]
-| where TestsFail == 0 and ItemsPassedOnRetry > 0 and TestsPassedOnRetry > 0
-| project JobId;
-WorkItems
-| where Status == 'PassOnRetry'
-| where JobId in (jobs)
-| summarize count(WorkItemId) by FriendlyName
-| project FriendlyName, count_WorkItemId
-```
-## Tests Passed on Retry summarized by type and date:
-
-```
-Jobs
-| where Source == [source_name]
-| summarize count(TestsPassedOnRetry) by Type, format_datetime(Finished,"yyyy-MM-dd")
-```
-## Tests Passed on Retry summarized by type and Month:
-
-```
-Jobs
-| where Source == [source_name]
-| summarize count(TestsPassedOnRetry) by Type, format_datetime(Finished,"yyyy-MM")
-```
-## Test Results for a specific WorkItemFriendlyName, Type, Method, Arguments, if the test had failed and passed on retry.
-Test Results table has huge amount of data so getting pass on retry data for entire set of tests/workitems will break the DB’s back, hence the need to filter by a specific test.
-
-```
-TestResults
-| where WorkItemFriendlyName == [WorkItemFriendlyName]
- and Type == [Type]
- and Method == [Method]
- and Arguments == [Arguments]
- and Result != 'Pass'
-| summarize arg_max(toint(Attempt), *) by WorkItemId, Type, Method, Arguments, ArgumentHash
-| join kind = leftouter(
- TestResults
- | where WorkItemFriendlyName == [WorkItemFriendlyName]
- and Type == [Type]
- and Method == [Method]
- and Arguments == [Arguments]
- and Result == 'Pass'
- ) on WorkItemId, Type, Method, Arguments, ArgumentHash
-| extend PassOnRetry = isnotnull(WorkItemId1)
-| summarize count() by Type, Method, Arguments, Result = iif(PassOnRetry, 'PassOnRetry', Result)
-```
-### Sample Value for Parameters:
-
-[source_name] : 'official/corefx/master/'
-
-[WorkItemFriendlyName] : 'System.Net.Http.WinHttpHandler.Functional.Tests'
-
-[Type] : 'System.Net.Http.WinHttpHandlerFunctional.Tests.WinHttpHandlerTest'
-
-[Method] : 'SendAsync_SlowServerAndCancel_ThrowsTaskCanceledException'
-
-[Arguments] : ''
-
-
-
-
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CAuto-Retry%20Failures%5CAutoRetryTelemetry.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CAuto-Retry%20Failures%5CAutoRetryTelemetry.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CAuto-Retry%20Failures%5CAutoRetryTelemetry.md)
-
Binary files /tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/Images/Logs.JPG and /tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/Images/Logs.JPG differ
Binary files /tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/Images/WorkItemAggregateSummary_Count.JPG and /tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/Images/WorkItemAggregateSummary_Count.JPG differ
Binary files /tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/Images/WorkItemAggregateSummary_Icon.JPG and /tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/Images/WorkItemAggregateSummary_Icon.JPG differ
diff -Nru "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/Overview-Requirements.md" "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/Overview-Requirements.md"
--- "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/Overview-Requirements.md" 2023-10-18 18:08:29.000000000 +0000
+++ "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Auto-Retry Failures/Overview-Requirements.md" 1970-01-01 00:00:00.000000000 +0000
@@ -1,68 +0,0 @@
-# Overview
-The requirement is to have an ability to retry Helix Test Failures automatically instead of a developer hunting the offensive test and having to manually queue a rerun or spam the PR's with "[at] dotnet-bot test this please".
-In order to promote always green before merging, a mechanism to manage inherently flaky tests in a highly visible fashion is needed. Some tests (like networking) perhaps are flaky by design, but others require additional dev attention. Regardless, the infrastructure should provide the ability to not fail the entire run - but rather provide visibility to the fact that certain tests required multiple tries to pass.
-
-The following are the Steps to achieve this at a high-level:
-1. Ability to configure at a job level:
- - If a work item can be retried on failure (defaults to false)
- - Max retries allowed for a workitem (defaults to 0)
-1. Helix Client detects a failed workitem and requeues the work item at the end of the queue. A work item is not a test.
-1. Helix API will retry the work item until it passes or max retry count is hit.
-1. Test results for each rerun is stored in DB for further retrieval.
-1. On success or when the max retries allowed count is reached the work item is marked as WorkItemFinished.
-1. MC displays the retried workitem with a specific color/format (TBD). The Tests that passed on retried displayed with a specific icon (TBD)
-1. MC displays logs for each retry.
-1. MC displays number of retries
-
-# Out of scope
-1. There will be no attempt to retry a work item on timeouts.
-
-Detailed requirements list below.
-
-# Helix Client
-
-1. Support a separate parameter / reuse delivery count in QueueInfo to identify if a particular work item is a retry work item.
-1. Requeue a work item if one or many tests fail in the work item.
-1. Identify if Min(Max Delivery Count for Requested Queue - 1, Requested Value for retries) has crossed.
-1. Retry work item requeued with a special event other than work item started.
-1. WorkItemFinished should not be sent when requeuing a workitem until the workitem succeeds or max retires for the workitem has reached.
-
-# Helix Controller
-
-In order to support automated configurable retry, we'll need a few changes in the controller:
-
-1. In Job Started V2+ messages, have a new optional property, call it MaxRetryCount or MaxDeliveryCount. (the difference between the two is 1)
-1. When the property is not supplied, set it to 0 (for MaxRetry option) or 1 (for MaxDelivery)
-1. When the property is supplied, set it to Min(Max Delivery Count for Requested Queue - 1, Requested Value)
-1. Attach the property of your choice to the work items being sent to the clients.
-
-### Nice to Have
-1. Optional: Send App Insights Trace every time you have to pick a value < than requested (helps investigations for folks curious about delivery counts.)
-
-# Helix API
-
- 1. Helix API needs to be aware of partial failures.
- 1. Helix API needs to be able to process multiple test results for a work item ie change the 1 to 1 mapping from name to result blob to potentially a list.
- 1. Helix API notifies WorkItemFinished to CI only after the retries are finished. No change to CI is needed.
-
-# DB
-
-1. Currently we check to pick the latest set of results for a workitem, this check needs to be removed.
-
-# Mission Control UI
-
-1. Add a new icon to show that the test passed on a retry / test failed despite reruns. TBD what icon needs to be used.
-1. Show multiple entries of logs for retried workitems.
-1. "Show Failures Only" checkbox should display the retried tests as well.
-1. Show number of retries, we are already displaying number of failures and skips
-
-### Nice to Have
-1. Separate Checkbox - Show retried workitems (TBD the name) should display only the retried work items
-
-
-
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CAuto-Retry%20Failures%5COverview-Requirements.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CAuto-Retry%20Failures%5COverview-Requirements.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CAuto-Retry%20Failures%5COverview-Requirements.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/AutoScaler/AutoScalerInvestigateIssues.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/AutoScaler/AutoScalerInvestigateIssues.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/AutoScaler/AutoScalerInvestigateIssues.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/AutoScaler/AutoScalerInvestigateIssues.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,160 +0,0 @@
-# Autoscaler - Debugging
-
-The autoscaler is running on all Helix subscriptions and HelixStaging .
-All the logs, traces, and exceptions of the autoscaler live on Application Insights.
-
-If you want to find any error or log about the running service, for production navigate to [dotnet-eng](https://ms.portal.azure.com/#@microsoft.onmicrosoft.com/resource/subscriptions/68672ab8-de0c-40f1-8d1b-ffb20bd62c0f/resourceGroups/dotnet-eng-cluster/providers/microsoft.insights/components/dotnet-eng/logs) and for staging navigate to [dotnet-eng-int](https://ms.portal.azure.com/#@microsoft.onmicrosoft.com/resource/subscriptions/cab65fc3-d077-467d-931f-3932eabf36d3/resourceGroups/dotnet-eng-int-cluster/providers/Microsoft.Insights/components/dotnet-eng-int/logs). You can use this [dotnet-eng/dotnet-eng-int guide](#logs-in-dotnet-engdotnet-eng-int) to navigate the information.
-
-If you want to see metrics or more data that is sent by the autoscaler for production navigate to [helix-autoscale-prod](https://ms.portal.azure.com/#@microsoft.onmicrosoft.com/resource/subscriptions/68672ab8-de0c-40f1-8d1b-ffb20bd62c0f/resourceGroups/helix-autoscale-prod/providers/microsoft.insights/components/helix-autoscale-prod/logs) and for staging navigate to [helix-autoscale-int](https://ms.portal.azure.com/#@microsoft.onmicrosoft.com/resource/subscriptions/cab65fc3-d077-467d-931f-3932eabf36d3/resourceGroups/auto-scale-int/providers/microsoft.insights/components/helix-autoscale-int/logs) for staging. You can use this [helix-autoscale-prod/helix-autoscale-int guide](#data-in-helix-autoscale-inthelix-autoscale-prod) to know which information you can find in there.
-
-## **Alerts**
-The main alerts related to the autoscaler are:
-
-* [Helix AutoScaler Service Stopped Running](#Helix-AutoScaler-Service-Stopped-Running)
-* [Cores consumption](#Cores-consumption)
-* [Autoscale: Minutes to scale-up from zero machine](#autoscale-minutes-to-scale-up-from-zero-machine)
-
-
-### **Helix AutoScaler Service Stopped Running**
-[Helix AutoScaler Service Stopped Running Grafana](https://dotnet-eng-grafana.westus2.cloudapp.azure.com/d/arcadeAvailability/service-availability?orgId=1&refresh=30s&from=1606937931219&to=1606959531219&panelId=60&fullscreen)
-Example: https://github.com/dotnet/core-eng/issues/11478
-
-Step by step:
-
-1. [Restart autoscaler node](#How-to-restart-the-autoscaler) as soon as you can - A lot of issues come after the autoscaler stops running such as the alert [Autoscale: Minutes to scale-up from zero machine](#Autoscale:-Minutes-to-scale-up-from-zero-machine).
-2. Find when the autoscaler stopped running (hour)
-3. See if there is any exception that was thrown on that period of time (on [dotnet-eng/dotnet-eng-int](#logs-in-dotnet-engdotnet-eng-int)). This will help you to know what happened.
-4. If there is not an exception, you can see the traces and analyze the last trace that was recorded to have an idea in which step the autoscaler stopped running.
-5. At this point the auto scaler should be running and you should be able to find the issue and fix it.
-
-
-### **Cores consumption**
-[Cores consumption Grafana](https://dotnet-eng-grafana.westus2.cloudapp.azure.com/d/quota/azure-quota-limits?orgId=1&refresh=30s&from=1606937685479&to=1606959285479&var-Resource=cores&var-Resource=standardDv3Family&panelId=30&fullscreen)
-Example: https://github.com/dotnet/core-eng/issues/11542
-
-The autoscaler is in charge of enforcing the core consumption while trying to make all the queues meet the SLA, when this alert gets trigger, we should answer the following questions:
-
-1. Are we over-scaling? This means we have more machines than we need. Grafana [queue monitor](https://dotnet-eng-grafana.westus2.cloudapp.azure.com/d/queues/queue-monitor?orgId=1) could be an awesome way to review this.
-2. Are we having problems scaling down? We should check if there are a lot of machines offline and they are not being deleted. (You can look at the [heartbeats table](#Heartbeats-table))
-3. Are a lot of machines not working and not getting clean? (The autoscaler always try to have machines heart beating so if there are a lot of machines dying the autoscaler is going to try to replace them which can lead to something similar of over scaling, for this you should check the scale sets)
-
-### **Autoscale: Minutes to scale-up from zero machine**
-[Autoscale: Minutes to scale-up from zero machine Grafana](https://dotnet-eng-grafana.westus2.cloudapp.azure.com/d/queues/queue-monitor?orgId=1&from=1606937898053&to=1606959498053&var-QueueName=buildpool.windows.10.amd64.open&var-QueueName=buildpool.windows.10.amd64.vs2017&var-QueueName=buildpool.windows.10.amd64.vs2017.open&var-QueueName=windows.10.amd64.open&var-UntrackedQueues=%22osx%22,%20%22perf%22,%20%22arm%22,%20%22arcade%22,%20%22xaml%22,%20%22appcompat%22&panelId=99&fullscreen)
-
-This alert can get triggered by a bunch of reasons but in some cases, it can be because of the autoscaler.
-
-There are two scenarios in which the alert normally gets triggered:
-* We are having problems scaling up.
-* The scale set has machines, but the machines are not heartbeating.
-
-#### *Scenario: Scaling up problems*
-The scale up problems can come for the autoscaler or the scale set.
-
-1. Check if the alert 'Helix AutoScaler Service Stopped Running' is not active, if this alert is active most likely this is the reason, and you should focus on getting the autoscaler running.
-2. Review the scale set and see if there are machines being created, if this is the case you know that soon there are going to be machines. Even in this case I suggest you take a quick look to see if the problem is not related to [machines not heartbeating](#scenario-the-scale-set-has-machines-but-the-machines-are-not-heartbeating).
-3. If there are no machines getting created
- * Check the `scaling up` traces for the queue in which the alert got trigger on [dotnet-eng/dotnet-eng-int](#logs-in-dotnet-engdotnet-eng-int). If there are not logs you should check the [machines not heartbeating scenario](#scenario-the-scale-set-has-machines-but-the-machines-are-not-heartbeating).
- * Pick the more recent scaling up trace and go to the scale set and see if the scale up instruction was received and which is the status (Started, Accepted or Succeeded) to know if it is a delay on Azure.
-
-#### *Scenario: The scale set has machines, but the machines are not heartbeating*
-The autoscaler is designed to replace the machines that are not heartbeating but this can be triggered if:
-* We are out of cores.
-* All the machines are offline, and we are not deleting the machines.
-* The machines are not starting.
-* We are taking too long to replace the machines.
-
-1. Review the scale set and the heartbeats table:
- * If all the machines are offline, we are having problems scaling down. Start for checking if the autoscaler puts those machines offline by reviewing the "OfflineReason", the autoscaler use "Scaling Down" as OfflineReason.
- * If a machine is on the scale set but is not appearing on the heartbeat table review if the machine ever appears (This information is available on HeartbeatExport table on Kusto) if it appears check the last status. Tha information can give you an idea if it has never started up or if it dies while doing a job.
-2. The system is designed to always reserve cores for the queues so all the queues should be able to have at least one machine so if this happens major changes need to be made to the rebalanced cores logic.
-
-## **Logs in dotnet-eng/dotnet-eng-int**
-
-There are two tables that have information about the autoscaler `traces` and `exceptions`.
-
-The autoscaler is not the only service that logs data on that App Insights, so you need to filter the information, a straightforward way to filter this is using the cloud_RoleName, the identifier for the autoscaler is 'fabric:/CustomAutoScale', and inside that group there are subgroups:
-
-* fabric:/CustomAutoScale/ProcessAutoScaleService - This is the one in charge of managing the actors.
-* fabric:/CustomAutoScale/AutoScaleActorService - This is the actor itself.
-* fabric:/CustomAutoScale/ProcessTelemetryService - This reports information about the telemetry that we are sending, in most cases you can exclude this one, unless you are investigating something telemetry specific.
-
-When looking for an error starting with the exceptions can give information faster, in most cases, you are going to need information about ProcessAutoScaleService and AutoScaleActorService having a query like the following one:
-
- exceptions
- | where cloud_RoleName == "fabric:/Helix/ProcessAutoScaleService"
- or cloud_RoleName == "fabric:/Helix/AutoScaleActorService"
- | sort by timestamp desc
-
-Remember to always use `Time range` to limit the information as much as you can.
-
-If you need more information you can always use the traces to get more data.
-
-If the problem is specific to a queue you should start by filtering the message with the queue, this could be done on the message or as part of a customDimensions:
-
- | where message contains "windows.10.amd64.open.rt"
-
- or
-
- | where customDimensions.queue == "windows.10.amd64.open.rt"
- or customDimensions.queueName == "windows.10.amd64.open.rt"
-
-
-If it is related to a specific problem, there are a couple of keywords that we normally use to filter the data, and this can be filtered as part of the message:
-
-* Scaling (or scaling up / scaling down)
-* Deleting
-* Machines online
-* MaxCapacity (this is related to the cores assigned for queue)
-
-For example:
-
- traces
- | where cloud_RoleName == "fabric:/CustomAutoScale/ProcessAutoScaleService"
- or cloud_RoleName == "fabric:/CustomAutoScale/AutoScaleActorService"
- | where customDimensions.queue == "windows.10.amd64.open.rt"
- | where message contains "scaling down"
- | sort by timestamp desc
-
-
-## **Data in helix-autoscale-int/helix-autoscale-prod**
-If you want more information about how the autoscaler is behaving, you can look at helix-autoscale-int/helix-autoscale-prod Application Insight in which information is recorded on `custom events ` table with the following names:
-
-* AdjustCapacity: Every 30 seconds reports all the variables that are important for the autoscaler to decide how many machines need, this are active messages, current capacity, desired capacity, machine creation time, max capacity, work items per machine and SLA.
-* AutoScaleReport: Every 30 seconds reports the current capacity (machines heartbeating) and queue depth.
-* QueueReport: Every minute reports queue depth and state of the heartbeating machines (initializing, offline, online, busy). This data can be analyzed on Grafana on [Queue Monitor Production](https://dotnet-eng-grafana.westus2.cloudapp.azure.com/d/queues/queue-monitor) or [Queue Monitor Staging](https://dotnet-eng-grafana-staging.westus2.cloudapp.azure.com/d/queues/queue-monitor).
-* QueueCapacityChanged: When the capacity of a queue changes send data about current capacity, previous capacity, and future capacity.
-
-## **Additional Info**
-
-### How to restart the autoscaler
-1. For production navigate to [Service Fabric Explorer dotnet-eng](https://dotnet-eng.westus2.cloudapp.azure.com:19080/Explorer/index.html#/) and for staging navigate to [Service Fabric Explorer dotnet-eng-int](https://dotnet-eng-int.westus2.cloudapp.azure.com:19080/Explorer/index.html#/). (If you have problems accessing this site, remember that you need a [certificate](#get-the-certificate-to-access-autoscaler-cluster))
-2. Find in which node is running ProcessAutoScaleService:
- * Open the following tabs: CustomAutoScaleType -> fabric:/CustomAutoScale -> Service fabric:/CustomAutoScale/ProcessAutoScaleService.
- * Open one more tab and you are going to be able to see the name of the node, is going to look like this: _Primary_3.
-2. Open the Nodes tab and find the node name that you got on the previous step.
-3. Go to Actions button and click on Restart.
-
-### Get the certificate to access autoscaler cluster
-1. Navigate to [HelixProdKV | Certificates](https://ms.portal.azure.com/#@microsoft.onmicrosoft.com/resource/subscriptions/68672ab8-de0c-40f1-8d1b-ffb20bd62c0f/resourceGroups/helixinfrarg/providers/Microsoft.KeyVault/vaults/HelixProdKV/certificates) / [HelixStagingKV | Certificates](https://ms.portal.azure.com/#@microsoft.onmicrosoft.com/resource/subscriptions/cab65fc3-d077-467d-931f-3932eabf36d3/resourceGroups/helixstagingkvrg/providers/Microsoft.KeyVault/vaults/HelixStagingKV/certificates)
-3. Select `dotnet-eng-client-westus2-cloudapp-azure-com` certificate.
-4. Open the current version.
-5. Download in PFX/PEM format:
- - Leave password empty
- - Certificate Store select Place all certificates in the following store > Browse > Personal
-
-### Heartbeats table
-To access the heartbeats table using Azure Portal, follow these steps:
-1. Navigate to [helixscripts2](https://ms.portal.azure.com/#@microsoft.onmicrosoft.com/resource/subscriptions/68672ab8-de0c-40f1-8d1b-ffb20bd62c0f/resourceGroups/helixinfrarg/providers/Microsoft.Storage/storageAccounts/helixscripts2/storageexplorer)/[helixstagescripts2](https://ms.portal.azure.com/#@microsoft.onmicrosoft.com/resource/subscriptions/cab65fc3-d077-467d-931f-3932eabf36d3/resourceGroups/helixstaginginfrarg/providers/Microsoft.Storage/storageAccounts/helixstagescripts2/storageexplorer)
-2. Open the Tables tab.
-3. Click on heartbeats.
-
-To access the heartbeats table using Microsoft Azure Storage Explorer, follow these steps:
-1. Open Microsoft Azure Storage Explorer.
-2. Navigate to Helix/HelixStaging subscription.
-3. Find the storage account helixscripts2 or helixstagescripts2.
-4. Open the Tables tab.
-5. Click on heartbeats.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CAutoScaler%5CAutoScalerInvestigateIssues.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CAutoScaler%5CAutoScalerInvestigateIssues.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CAutoScaler%5CAutoScalerInvestigateIssues.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/build-failure-buckets.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/build-failure-buckets.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/build-failure-buckets.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/build-failure-buckets.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,16 +0,0 @@
-
-
-### Misc references:
-- Here’s the project you need to add a reference to in order to get a stream to an azure blob: https://mseng.visualstudio.com/Tools/Engineering%20Infrastructure/_git/CoreFX%20Engineering%20Infrastructure?path=%2Fsrc%2Fcommon&version=GBmaster&_a=contents
-- Here is the class that has the helpers around blobs (https://mseng.visualstudio.com/Tools/Engineering%20Infrastructure/_git/CoreFX%20Engineering%20Infrastructure?path=%2Fsrc%2Fcommon%2Fcommon%2Fazure%2FAzureBlobStorage.cs&version=GBmaster&_a=contents); right now there’s a constructor that requires a storage account even though the function you need (GetExternalBlobReadStreamFromSasTokenUrlAsync) doesn’t utilize that account. We can add a parameter-less constructor to that class if you need
-
-- regex for buckets from Jenkins: https://ci.dot.net/failure-cause-management/
-- Plug in itself: https://github.com/jenkinsci/build-failure-analyzer-plugin
-- XML hurl w/ regex: https://ci.dot.net/userContent/build-failure-analyzer.xml
-
-- Aho-Corasick Alg for searching large files: https://github.com/pdonald/aho-corasick (https://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_algorithm)
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5Cbuild-failure-buckets.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5Cbuild-failure-buckets.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5Cbuild-failure-buckets.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/buildingvertical.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/buildingvertical.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/buildingvertical.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/buildingvertical.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,124 +0,0 @@
-# Building a Vertical Implementation Details #
-
-**Definitions**
-
-*VerticalTargetGroup*
-
-`VerticalTargetGroup` - We need a property to define the vertical target group, but we don't want to set "TargetGroup" explicitly or we won't be able to build the "" TargetGroups for projects.
- If `VerticalTargetGroup != ""`, we import buildvertical.targets which will contain our additional targets.
-
-*SupportedGroups*
-
-For each ref project and src project, we define `SupportedGroups`. `SupportedGroups` is a tuple for the supported `TargetGroups` and `OSGroups`.
-
-
-ie
-
-ref\System.Runtime.csproj
-```MSBuild
-
-
- netstandard1.7_Windows_NT;
- netstandard1.7_OSX;
- netstandard1.7_Linux;
- netcoreapp1.1_Windows_NT;
- netcoreapp1.1_OSX;
- netcoreapp1.1_Linux
-
-
-```
-
-*Contract Layer*
-
-We have a contract layer (msbuild task).
-
-Inputs:
-
- SupportedGroups
- VerticalTargetGroup
- OSGroup
-Output:
-
- VerticalTargets (ItemTask)
- metadata: TargetGroup
- OSGroup
-
-Given the supported target and OS groups, and the desired vertical target and OS groups, return the closest supported group or empty metadata items.
-How should we handle determining the target / os groups, fallback groups, etc...? The simplest solution is to use the NuGet api's for targets. We can use platforms\runtime.json for os groups, or try to use the already existent os group filtering instead of adding it to the contract layer.
-
-Options:
-
-1. Use NuGet API's
-
-2. Make use of inormation we already have and develop our own resolution algorithm.
-
-The current plan is to use the NuGet API's. We know that there is an intrinsic problem with the NuGet API's, in that we (CoreFx) define the targets (tfm's), but NuGet contains the data / logic, so anytime we want to create a new tfm, we have to go make an update to NuGet. This is an existent problem. For now, it is much simpler to utilize NuGet instead of deriving a second solution. When we break free of the NuGet dependency and wholly define our tfm graph, then we should utilize that solution for this work.
-
-**Building a vertical implementation steps**
-
-1 - Include all projects, we don't need to build the .builds files for each library, because we only want to build each project at most once for a given vertical.
-
-```MSBuild
-
-
-
-
-```
-
-2 - Iterate all projects through the contract layer, removing (and logging) any projects which return null metadata (not supported).
-
-3 - Build `OutputPath` is set to drop all binaries into a single folder
-
-Current standard `OutputPath`
-
-```MSBuild
-$(BaseOutputPath)$(OSPlatformConfig)/$(MSBuildProjectName)/$(TargetOutputRelPath)$(OutputPathSubfolder)
-```
-Example: E:\gh\chcosta\corefx\bin/AnyOS.AnyCPU.Debug/System.Buffers/netcoreapp1.1/
-
-Proposed vertical `OutputPath`
-
-```MSBuild
-$(BinDir)/$(VerticalTargetGroup)/$(OSPlatformConfig)
-```
-Example: E:\gh\chcosta\corefx\bin/netcoreapp1.7/AnyOS.AnyCPU.Debug
-
-Traditionally, the output path contains the `TargetGroup` as a part of the path. The flat structure means we don't have to play games with the `TargetPath` to figure out when, for example, "System.Buffers" ("netstandard1.1") is trying to find the "System.Runtime" reference ("netstandard1.7"), that there is no path for "System.Runtime.dll" containing the "netstandard1.1" target group.
-
-4 - Build all reference assemblies. The reference assembly projects, which were not trimmed in step 2, are all built. TBD, should we again use the contract layer during the build to determine the targets for the project, or should we capture that as metadata for the project in step 2?
-
-5 - Build all src assemblies into the "OutputPath". The src assembly projects, which were not trimmed in step 2. are all built.
-
-6 - build packages, TBD
-
-**Building a library**
-
-In addition to the ability to build an entire vertical, we require the ability to build a single library. This, single library build should utilize context to determine TargetGroup and OSGroup. ie, If a vertical build completes, and you want to build an individual library, it should use the group values from the vertical build unless you specify otherwise. If you specify otherwise, then those settings become the new settings. If no context is available, then the library should be built with a set of commond default values.
-
-When building an individual library, or project, its P2P references must be queried to determine supported configurations for building that refernce and then the best configuration must be chosen.
-
-**Additional issues**
-
-- building specific folders (filter by partition)?
-
-- building / running tests for a vertical
-
- - building tests against packages
-
-- Official builds?
-
-- CI testing?
-
-- Validation
-
- - Is it an error condition if any library does not contribute to the latest standard vertical?
-
- - Is it an error condition if a library does not contribute to any OS group? probably
-
-
-
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5Cbuildingvertical.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5Cbuildingvertical.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5Cbuildingvertical.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/cmake-design.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/cmake-design.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/cmake-design.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/cmake-design.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,60 +0,0 @@
-# Design Contract for Searching and Acquiring Build Prerequisites
-
-This document presents the design contract for searching and acquiring build prerequisites. Scope is limited to developer workflow in [CoreFx](https://github.com/dotnet/corefx.git) repository.
-
-A CoreFx developer uses `build.cmd` or `build.sh` to build the repository. Build requires certain tools, for example, CMake. Build should have a specified set of locations to search a tool, and if the tool is not found in any of those locations then, acquire the tool from a specified URL. This document describes the contract between the build and scripts that search and acquire a tool.
-
-####Tool Manifest
-
-Details of each tool required for the build will be in a manifest file `.toolversions` located in the root of the repository. Each tool should have the following details:
-
- 1. Name of the tool
- 2. Declared version
- 3. Search paths where the tool is likely to be found
- 4. Acquire paths from where the tool can be obtained
-
-An example of `.toolversions` is shown below:
-
-----------
-![toolversions.](./assets/toolversions.png?raw=true)
-
-----------
-
-####Probing Mechanism
-
-Build will use a probing mechanism to get the required tool. For any tool, probing involves the following three tasks in sequence:
-
- 1. Search the tool requested by the build. Searches the tool in locations specified in `.toolversions` If tool is found then, return the tool path to the build.
- 2. If search fails to find the tool then, acquire the tool from the location specified in `.toolversions`
- 3. If search and acquire fail then, return an error message to build
-
-####Search
-
-ISearchTool interface provides virtual and abstracts methods that accomplish searching of a tool. Default implementation would search the tool in environment path and a location within the repository specified in`.toolversions`. A tool can override the base, and have its own implementation of search.
-Each tool should implement abstract methods.
-
-####Acquire
-
-IAcquireTool interface provides virtual and abstracts methods that accomplish the acquisition of a tool. Default implementation would download the tool from the URL specified in `.toolversions`, and extract the tool to a location within the repository specified in `.toolversions`. A tool can override the base, and have it own implementation of acquisition.
-Each tool should implement abstract methods.
-
-####Helpers
-
-Helpers are a set of utility functions. For example, a function that can parse `.toolversions` and get the declared version of a tool. Probe, search and acquire scripts will use these functions.
-
-Probe, search and acquire scripts will be in `tools-local` folder in root folder of CoreFx repository.
-A short description of each folder under `tools-local` is provided in the table below.
-
-Folder | Description
------- | -----------
-unix | Shell scripts that provide default implementation of ISearchTool and IAcquireTool, probe-tool, and helpers
-unix/cmake | Shell scripts that override the default implementation, and implement abstract methods for CMake.
-unix/clang | Shell scripts that override the default implementation, and implement abstract methods for CMake.
-
-Similar folder structure will be available for Windows PowerShell scripts. `Tools/downloads/` in CoreFx repository root will be the default location for downloaded tools.
-
-Official builds will override the default locations where the tool is searched and acquired. These override locations for official builds are specified in a copy (not available in open) of `.toolversions` file. If a path to the override is specified as a command line argument to `build.cmd` or `build.sh` then, config values from `.toolversions` file located in that specified path are used in search and acquire scripts.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5Ccmake-design.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5Ccmake-design.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5Ccmake-design.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/cmake-scenarios.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/cmake-scenarios.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/cmake-scenarios.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/cmake-scenarios.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,177 +0,0 @@
-This document presents the scenarios for CMake in .NET Core.
-
-# Summary
-[CMake](https://cmake.org/overview/) is a prerequisite for building .NET Core repositories such as CoreFx and CoreCLR. When a developer attempts to build, a repository using the build script, the script probes for CMake on the machine, and if CMake is not found then, the script terminates and the build fails. There is no guidance for developer on which version of CMake to install. Not stating a version of CMake for a repository can lead to further challenges for example, when a servicing a release.
-
-Thus, there is need to improve CMake usage and acquisition experience. Hence the purpose of this document is to define the scenarios for CMake in .NET Core, and lay a foundation for designing the improved experience.
-
-# CMake Scenarios
-Each subheading below is a name of a scenario. Under each subheading there will be a short description of the scenario and a narrative. The narrative includes an actor, flow of events, and the resulting outcomes. An outcome consists of one or more pairs of the current and desired experiences.
-
- * [Build a .NET Core repository on a clean machine](#build-a-net-core-repository-on-a-clean-machine)
- * [Build a .NET Core repository on an existing development machine](#build-a-net-core-repository-on-an-existing-development-machine)
- * [Setup an official build for a .NET Core repository](#setup-an-official-build-for-a-net-core-repository)
- * [Service a .NET Core release](#service-a-net-core-release)
- * [Revise the CMake version of a .NET Core repository](#revise-the-cmake-version-of-a-net-core-repository)
- * [A new version of CMake can be tested against a .NET Core repository](#a-new-version-of-cmake-can-be-tested-against-a-net-core-repository)
- * [Guidance for setting up official builds for a .NET Core repository is available](#guidance-for-setting-up-official-builds-for-a-net-core-repository-is-available)
-
-## Build a .NET Core repository on a clean machine
-A .NET Core user (Microsoft employee or someone in the open) clones a .NET Core repository and attempts to build.
-
-### Narrative
-I am an IT Manager from Contoso who was inspired by .NET Core demos at [Connect(); 2016](https://msdn.microsoft.com/en-us/magazine/connect16mag.aspx), and I would like to contribute to .NET Core.
-
-Flow of events:
- 1. I setup a clean Windows 10 VM through my Azure subscription.
- 2. I cloned CoreFx repository from [dotnet/corefx](https://github.com/dotnet/corefx.git)
- 3. I attempted to build the repository using the build command (build.cmd)
-
-### Outcome #1 Build fails
-**Current**: Build fails saying CMake, which is a prerequisite, is missing on the VM. Though the error message provides an URL from where CMake can be downloaded, it does not list a specific version or a range of supported versions. I am not certain on what version to download.
-
-**Desired**:
-Build probes for CMake in .NET Core sandbox tools folder in addition to the current probing locations. If CMake is not found, then the build attempts to acquire the declared version of CMake from the ***tools*** cache. If acquisition fails, then the build presents an error message that informs the user the specific version of CMake to download, and the suggested source to download it from. I have two options from here:
-
- - I download the specific version and perform a default install. *(TBD: Should the user restart Command or Terminal window?).*
- - Perform a gesture described in the error message to acquire CMake. I perform the gesture so that a tool downloads and extracts the declared version of CMake to a .NET Core sandbox tools folder.
-
-Either of the above options allow me to run build command successfully.
-
-## Build a .NET Core repository on an existing development machine
-A .NET Core user would like to clone a .NET Core repository to his/her existing development machine, and build that repository.
-
-### Narrative
-I am an IT Manager from Contoso who was inspired by .NET Core demos at [Connect(); 2016](https://msdn.microsoft.com/en-us/magazine/connect16mag.aspx), and I would like to contribute to .NET Core.
-
-Flow of events:
- 1. On my existing development machine, I cloned CoreFx repository from
- [dotnet/corefx](https://github.com/dotnet/corefx.git)
- 2. I attempted to build the repository using the build command (build.cmd)
-
-### Outcome #1: Build succeeds
-**Current**: I have no indication about the CMake version used.
-
-**Desired**: I can refer to a build artifact to find the version of CMake used in the build.
-
-Note: Since an existing development machine is being used, the machine could have the declared version of CMake.
-
-### Outcome #2: Build fails
-**Current**:
-
- - Build fails saying CMake, which is a prerequisite is not available on the machine. I try to download CMake based on the error message. I am not certain on what version to download.
- - Build fails in strange ways due to a version of CMake present on the machine, and thus making it difficult to trace back.
-
-**Desired**:
-
- 1. On a clean machine, the desired experience should be same as earlier scenario [Build a .NET Core repository on a clean machine](#build-a-net-core-repository-on-a-clean-machine)
- 2. On an existing machine that has a version of CMake, which is different than the declared version, there are two outcomes:
- 1. Default outcome is build consumes the available CMake version.
- 2. I can ensure the build consumes the declared version of CMake. This means if the build detects that the version of CMake available is not the declared version, then the build attempts to acquire the declared version. This acquisition experience should be same as in the earlier scenario [Build a .NET Core repository on a clean machine](#build-a-net-core-repository-on-a-clean-machine).
-
-Either of the above options allow me to run build command successfully.
-
-## Setup an official build for a .NET Core repository
-A .NET Core repository owner would like to setup a reliable, repeatable and trustable process of producing official builds.
-
-### Narrative
-I am the owner of [.NET CoreFx](https://github.com/dotnet/corefx) repository. I would like to setup a process that will produce reliable, repeatable and trustable builds for this repository.
-
-Flow of events:
-
- 1. Created a new VSTS build definition that runs the build command of the repository.
- 2. Ensured the builds succeed
-
-### Outcome #1: Official builds consume the declared version of CMake.
-
-**Current**: .NET Core repository is built using a version of CMake installed while setting up the VM. Version of CMake is not logged in build artifact.
-
-**Desired**:
- 1. I can find the declared version of CMake for CoreFx or any .NET Core repository within the repository itself.
- 2. I can setup official build of a .NET Core repository such that the build acquires the declared version of CMake from OSS Tools repository, and places it in a sandbox location.
- 3. I can ensure that the official build utilizes CMake tool from the sandbox folder.
-
-Note: OSS Tool repository will download CMake source code, security audit the source code, build and then host. Thus, minimizing any security risks that might arise when CMake is consumed directly from internet.
-
-### Outcome #2: Official builds setup and maintenance is reliable and costs lowered
-**Current**: Build agents are tied to a specific version of CMake.
-
-**Desired**: Build agents are more contained and not dependent on a particular build agent for CMake installations. Thus, the same agent can build multiple repositories and branches.
-
-## Service a .NET Core release
-A .NET Core team member who would like to service a release, and needs the release configuration to rebuild.
-
-### Narrative
-I am a .NET Core team member who is assigned to service a [CoreFx](https://github.com/dotnet/corefx) release to address an issue reported in the product.
-
-Flow of events:
- 1. I checked out the release branch on my local developer machine. Understood the root cause of the reported issue.
- 2. I created a service branch i.e., fork from release branch, and have a fix ready.
- 3. I followed the developer guidelines available for that branch to perform a build.
-
-### Outcome: Service branch builds consume the declared version of CMake
-**Current**: Since a declared version of CMake is not available within the repository, I will build the service branch using the latest version of CMake available. This latest version of CMake might introduce new product behaviors that I will have to resolve. Thus, lack of declared version introduces uncertainty and additional costs in servicing a branch.
-
-**Desired**:
- 1. I can run the build such that if the required toolset does not match then, the build acquires the required toolset.
- 2. I can find out the CMake version used to build the release branch. Declared version is available within the repository itself.
- 3. I can acquire and run build with tools in a sandbox folder.
-
-## Revise the CMake version of a .NET Core repository
-A .NET Core contributor can update the declared CMake version for a given .NET Core repository.
-
-### Narrative
-I am a .NET Core contributor working on new features in CoreFx. These features require the latest version of CMake.
-
-Flow of events:
- 1. I verified the compatibility of existing features with the new version of CMake.
- 2. I would like to update the product to be built using this new version of CMake.
-
-### Outcome: All build scenarios are aware of the new version of CMake
-**Current**: Though as a .NET Core contributor I can build a local repository using different versions of CMake, doing the same with official builds involves cost such as updating build definition, notifying repository owners, and finally inform the open community of users about the new version of CMake.
-
-**Desired**: As a .NET Core contributor I can modify declared CMake version, and submit this change as a pull request (PR). The change to declared CMake version is reflected in all build scenarios. This means, the official builds will pick up the new version from OSS Tools repository, and other scenarios reflect the change to the declared version of CMake.
-
-Note: A requirement for this scenario is that the new version of CMake should be available on OSS Tool repository.
-
-## A new version of CMake can be tested against a .NET Core repository
-A .NET Core team member can try a new version of CMake to build a .NET Core repository.
-
-### Narrative
-As a team member of .NET Core or Engineering Services, I would like to check the applicability of a new version of CMake in a .NET Core repository. For instance, verify if a new version of CMake is compatible with a .NET Core repository, and no unexpected regressions in the product are introduced.
-
-Flow of events:
- 1. I built the local repository using the new version of CMake. Ensure build succeeds.
- 2. I would like to test official builds produced with this new version of CMake.
-
-### Outcome: A test run of official build consuming the new CMake package can be performed
-**Current**: Updating an official build to test a new version of CMake involves the following steps -
- 1. Creating a new agent pool
- 2. Adding VMs to the pool where each VM has the new version of CMake installed
- 3. Setting up new build definitions that point to this pool
-
-**Desired**:
- 1. I'm able to build a .NET Core repository using a CMake version, which is not in the declared version.
- 2. I can perform a test run of official build and ensure that the CMake package is consumed, and build succeeded.
-
-## Guidance for setting up official builds for a .NET Core repository is available
-A .NET Core repository owner refers to documentation that describes the procedure to setup up official builds.
-
-### Narrative
-I'm team member on Red Hat. I would like to setup official builds of our CoreFx repository using the same CMake version used in .NET Core official builds.
-
-Flow of events:
- 1. I forked CoreFx repository from [dotnet/corefx](https://github.com/dotnet/corefx.git)
- 2. I followed the developer guidelines to setup official builds
-
-### Outcome: .NET Core users refer to documentation about setting up official builds
-**Current**: In my attempt to setup official builds, I will use the latest version of CMake in official builds since CoreFx documentation does not provide any guidance on declared CMake version. Native binaries from Red Hat build is now significantly different from those produced in .NET Core product team's official builds. This difference is due to the different versions of CMake used in the respective builds.
-
-**Desired**:
- 1. Declared version of the .NET Core repository is available within the repository itself. I can find it, and enforce it with my toolset story
- 2. I can specify a version of CMake that should be consumed in the build command of my .NET Core repository.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5Ccmake-scenarios.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5Ccmake-scenarios.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5Ccmake-scenarios.md)
-
diff -Nru "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Core Reduction/Automated_MaxScale_Updates.md" "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Core Reduction/Automated_MaxScale_Updates.md"
--- "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Core Reduction/Automated_MaxScale_Updates.md" 2023-10-18 18:08:29.000000000 +0000
+++ "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Core Reduction/Automated_MaxScale_Updates.md" 1970-01-01 00:00:00.000000000 +0000
@@ -1,56 +0,0 @@
-## **Proposal for automating max scale updates** ##
-
-### **Steps needed to calculate max scale for queues:** ###
-
-1. Determine the usage for the queue for the past 60 days by querying the Kusto database and getting the workitems that were queued, process and completed during this time frame.
-2. Determine the average waittime and 95th percentile waittime for the queue based on #1
-3. Determine max machines / cores used to process the workitems from #1.
-
-The above steps are done by using the [Scaler Simulator](#Scaler-Simulator) tool.
-
-### **Scaler Simulator:** ###
-
-[Scaler Simulator](https://dev.azure.com/dnceng/internal/_git/dotnet-helix-machines?path=%2Ftools%2FScalerSimulator&version=GBScalerSimulator&_a=contents) is a tool that can currently do the following:
-
-- Query Kusto to get workitems for a specific queue for a specified time frame.
-- Calculate the average and 95th percentile wait time for the queue based on data from Kusto.
-- Determine the max machines that were needed to process the workitems for that specific queue.
-- Given a max set of machines for a queue, the simulator can simulate the processing of workitems (based on the Kusto query above) and calculate the average and 95th percentile waititme.
-- Given a SLA for wait time, the simulator can also reverse-engineer and determine how many machines would be needed for the queue to process the workitems(based on the Kusto query above)
-
-### **Automation Proposal Stages:** ###
-
-**Stage 1:**
-
-* Extending Scaler Simulator to do the following based on the assumption that the baseline of core/machine distribution is already arrived at in the machine config yamls:
-
- * Inputs:
- * Total machines / cores that can be used
- * 95th percentile SLA for wait time for Build Queues
- * 95th percentile SLA for wait time for Test Queues
-
- * Process/simulate each queue in the list of queues that are active (this list currently is hardcoded in a csv but can be extended to pick non-deadlettered queues from info.json directly) to determine what the max machines that are needed to process the load in the queue to be able to hit the SLA given.
-
- * Rebalance the max machines allocated based on the past usage by maintaining a running surplus list during the course of a simulator run.A successful run spits out the updated max scale list for the queues processed.
-
- * i. If the simulation determines that the max machines needed for a specific queue is less than the max scale already set, then update the max scale and add the remaining to the surplus.
- * ii. Maintain a list of queues that need more machines than the current max scale value. If the simulation determines that the max machines needed are more than the max scale already set, then update the list with the queues and how much more is needed.
- * iii. Once the simulation finishes processing all queues and a final surplus machines count is available, allocate / distribute the surplus among the list of queues identified in #ii. TBD – Actual implementation on the mechanics behind the distribution process.
- * iv. While doing #iii, if the simulator exhausts all items in the surplus list and there are still queues that require more machines than the max scale that is already set, then fail the simulator letting the user know “This operation will exceed the max total cores allowed, please readjust the total cores or the wait time SLA”
- * v. During the process, if any queue has no past usage then mark the queue to be deadlettered unless identified as a new queue. TBD - process to identify a new queue.
-
-**Stage 2:**
-
-* The extended simulator opens a PR with the determined changes to the max scale to dotnet-helix-machines master branch which then will be reviewed and merged by a human. The change will then follow the normal rollout process to make it to Production.
-
-**Stage 3:**
-
-* The extended Simulator can be run manually by providing the inputs on as-need-basis. If we determine that this is needed to be run on a regular cadence, then the simulator can be hooked up to a separate pipeline and schedule a run based on the need with the variables of the pipeline as inputs.
-
-**Stage 4:**
-
-* Instead of using a simulator to determine the max scale, rebalance the max scale on the fly based on the usage by the custom auto-scaler and is able to take in the input of what the total machines allowed are to consider while rebalancing. TBD - Feasibility of implementing this into auto-scaler and ability to test this thoroughly.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CCore%20Reduction%5CAutomated_MaxScale_Updates.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CCore%20Reduction%5CAutomated_MaxScale_Updates.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CCore%20Reduction%5CAutomated_MaxScale_Updates.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/custom-auto-scaling.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/custom-auto-scaling.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/custom-auto-scaling.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/custom-auto-scaling.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,48 +0,0 @@
-# Custom Auto-Scaler v2
-
-The goal of this project is to build a reliable and cost-efficient scaling system to manage Azure Scale sets for Helix VMs.
-
-## Telemetry processor
-
-Telemetry processor collects the following data:
-- From service bus, the number of workitems in the queue
-- From the heartbeats table, the number of machines that have had a heartbeat in the last minute
-It is implemented as a service in service fabric, runs every 30 seconds and sends the collected data to AppInsights.
-
-![](./assets/Telemetry.png)
-
-This replaces ProcessTelemetry Azure function in Custom Auto-Scaler v1
-
-## Alerting
-
-The system uses Grafana to create alerts when Helix queues aren’t scaling, it will create a GitHub issue when there has been work in a queue waiting for more than 30 minutes and no machines have showed up in the heartbeats table. Grafana doesn’t query the heartbeats table directly, instead it reads the data from AppInsights to tell how long the work items have been waiting for a machine.
-
-![](./assets/GrafanaAlert.png)
-
-At first, the Auto Scaler team monitors these alerts but after the stabilization phase FR will investigate these alerts.
-
-## Scale in
-
-Custom Auto-Scale v2 scales to any capacity the service requires.
-For scaling in, the service uses Helix API to take machines offline and then deletes the machine from the corresponding scale set. It will delete the oldest idle machine in the queue trying to keep the machines as fresh as possible, it uses the information in the heartbeats table to get the creation date of each machine.
-
-![](./assets/MachinesOffline.png)
-
-## Telemetry
-
-The service sends to AppInsights the following telemetry
-- Time it took for a queue to scale to the desirable capacity
-- State of the queue and the desired capacity for this state
-
-Core-eng uses this data to make adjustment in the scaling rules
-
-## Multiple subscriptions
-
-Every team, that requires it, has its own queues for their work reducing the noise caused by using shared queries, giving information of how much money is being spent running their jobs and allowing to modify their scaling rules as needed.
-
-![](./assets/MultipleSubs.png)
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5Ccustom-auto-scaling.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5Ccustom-auto-scaling.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5Ccustom-auto-scaling.md)
-
diff -Nru "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Dependency Flow/improved-dependency-flow.md" "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Dependency Flow/improved-dependency-flow.md"
--- "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Dependency Flow/improved-dependency-flow.md" 2023-10-18 18:08:29.000000000 +0000
+++ "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Dependency Flow/improved-dependency-flow.md" 1970-01-01 00:00:00.000000000 +0000
@@ -1,154 +0,0 @@
-# Improved Dependency Flow for .NET 5
-
-## Goals
-
-* Eliminate the need for opening Pull Requests for inter-repository dependency
- updates in channels where breaking changes are the exception, in order to
- reduce the total time it takes to achieve a full product build by skipping PR
- validation builds.
-
-* Keep the existing dependency flow PR functionality in place for channels and
- repos that won't benefit from the new flow.
-
-## Non-Goals
-
-* We are not looking to optimize dependency flow for channels and repos that are
- in active development. Constant commits to the target branches for
- subscriptions will result in the new flow not being able to automatically
- merge changes, and will instead result in delays opening the dependency update
- PRs.
-
-* We won't thoroughly validate builds that result from applying dependency
- updates with the new flow. Since we will only run the official builds for the
- target repositories, any testing that is performed during PRs will be skipped,
- and any failures during CI builds won't have an associated PR that caused the
- break.
-
-## Overview
-
-In order to reduce the number of dependency update PRs that flow across the
-stack, we will attempt to apply dependency updates directly on the branches that
-subscriptions are targetting. In order to accomplish this, Maestro++ will
-dynamically create branches to apply dependency updates to, run an official
-build for the repo, and if both the build succeeds, and a fast-forward merge to
-the target branch is possible, it will directly merge the dependency updates. In
-cases where there's an error in the build that takes the new dependencies, or in
-cases where a fast-forward merge is not possible, we will fall back to opening a
-PR just like it happens today.
-
-This flow will ensure that dependency updates are seamlessly flowing across the
-stack without manual intervention, especially for release channels, where
-breaking changes are the exception, and dependency update commits make up the
-brunt of the branch's history. This means that PR validation builds are
-constantly increasing the times required to flow depenencies.
-
-![release-branch-history-example](release-branch-history.png)
-
-## Repository Requirements
-
-We would like to keep the changes that product repositories need to perform to a
-minimum. However, some configuration will need to be performed for repos that
-wish to participate.
-
-* Since we will depend on running an official build of the repo from a branch
- that is created and deleted once the dependeny flow process finishes, we will
- require that there are no unwanted side effects based on the branch name that
- runs the build. (For example, "Unless running the build from the master
- branch, copy blobs from location A -> B, and fail in other cases)
-
-* Branch policies need to be configured such that the bot account that will
- perform the merging of updates needs to have push permissions to any branch
- where this functionality is to be enabled.
-
-## Subscription Changes
-
-* We will introduce a new option for subscriptions to opt-in into the new direct
- merge behavior. This option will be provided for both existing and new
- subscriptions.
-
-* As Maestro++ will have to be able to monitor the official builds for a
- subscription's target repository, we will use the build information currently
- in the BAR to attempt to pre-populate the Azure DevOps mirror for the
- repository, along with the build definition that hosts its official build. In
- cases where it's not possible to infer, this information will need to be
- provided by the user when setting up or updating the subscription.
-
-## Maestro++ Service Changes
-
-When processing dependency updates for a subscription that has the new option
-enabled, the Maestro++ service will:
-
- 1. Create a branch in the internal target repository (internal AzDO mirror for
- GitHub repos, the base repo for Azure DevOps repos) based off the head of
- the target branch for the subscription in the format:
- `darc-flow--`
-
- 1. Apply the version updates to the `darc-flow` branch, In the case of batched
- subscriptions, the service will wait some time for the various updates to
- come in and apply them to the same branch.
-
- 1. Trigger and monitor an official build of the repository for the `darc-flow`
- branch.
-
- 1. If the build is successful, attempt a fast-forward only merge into the
- target branch. If successful:
- * Trigger the [Dnceng Build Promotion
- Pipeline](https://dnceng.visualstudio.com/internal/_build/results?buildId=550056&view=results)
- or the [DevDiv Build Promotion
- Pipeline](https://devdiv.visualstudio.com/DevDiv/_build?definitionId=12603&_a=summary)
- depending on which org hosts the official build pipeline for the repo to
- publish the build assets to the feeds and blob storage, and add the
- build to the target channel for the subscription. This will cause
- downstream dependency updates to trigger.
- * Once merged into the main repository, the commit into the internal
- mirror will end up triggering another official build, which will for all
- intents and purposes produce identical assets as the ones produced in
- step 4. This build will be tagged to indicate it's a duplicate, and will
- be cancelled and deleted from the build pipeline's history.
-
- 1. If the fast-forward merge fails:
- * Attempt to merge the version updates with a merge commit. The official
- build will be triggered by this commit once it gets mirrored to the
- internal repo, and this build will publish and be added to the channels
- automatically via the branch's default channel.
-
- 1. If there's a failure in the official build, or if there are merge conflicts
- when attempting to merge, the version updates will be re-applied against
- the HEAD of the target branch, and the existing flow model that opens Pull
- Requests in the target repo will kick in.
-
- 1. Clean up the `darc-flow` branch.
-
-**Note:** In cases where additional dependency updates targetting the same
-branch need to be processed after the official build has been triggered but
-before the commit has been merged into the taget branch, the new dependency
-update will be queued until the steps have finished for the current set of
-updates.
-
-## Timeline Ingestion Changes
-
-In order to avoid skewing the data used for the various reports, we will modify
-the Telemetry application so that builds that have been tagged by Maestro++ as
-duplicates are ignored and not ingested into Kusto.
-
-## Rollout and Validation Plan
-
-1. We will initially enable the functionality in the test repositories in the
- [maestro-auth-test](https://github.com/maestro-auth-test) org and will only
- be exercised by the Scenario tests that run on every check-in of the
- arcade-services repository.
-1. Enable it in the Arcade-services and Arcade-Validation repositories. These
- repos won't need any only take dependency updates from Arcade at a
- predictable cadence and have stable official builds. We should see the new
- flow working most of the time. This will require some work to make sure that
- only the build stages of the Arcade-Services pipeline run, as to not actually
- deploy anything during the official build of the `darc-flow` branches.
-1. Once we are comfortable with the results of the controlled testing, we can
- start enabling the functionality in product repos for the .NET 5 preview
- channels.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CDependency%20Flow%5Cimproved-dependency-flow.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CDependency%20Flow%5Cimproved-dependency-flow.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CDependency%20Flow%5Cimproved-dependency-flow.md)
-
Binary files /tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Dependency Flow/release-branch-history.png and /tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Dependency Flow/release-branch-history.png differ
diff -Nru "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Dev WF Actionable PRs/Capabilities and Data.md" "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Dev WF Actionable PRs/Capabilities and Data.md"
--- "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Dev WF Actionable PRs/Capabilities and Data.md" 2023-10-18 18:08:29.000000000 +0000
+++ "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Dev WF Actionable PRs/Capabilities and Data.md" 1970-01-01 00:00:00.000000000 +0000
@@ -1,60 +0,0 @@
-# Capabilities and Data
-
-Taking as inputs the discussions had so far, the Checks Tab mock-ups Chad completed and the scoping done during our latest Gap-minding meeting I did some investigations and experiments to better understand the AzDO APIs, especially around tests. I'm happy to say it looks like it will be a good solution for augmenting data we already store in Kusto, solving problems of timeliness (e.g. Kusto ingestion delay) and scale (e.g. test results).
-
-Going through the meeting notes and the Checks Tab mocks, I organized the questions this system is planning to answer and where that information will be retrieved.
-
-Generally, the Checks Tab frames information in terms of
-
-- Build failures
-- Test failures
-
-Each type of failure showing information about
-
-- If unique
-- If seen previously
-- If happening now or recently on target (master) branch
-
-## Concerning current build failure information
-
-Run error information determined from aggregate of
-
-- AzDO Get Build API
-- AzDO Build Timeline API
-- AzDO Get Test Run API
-
-Answering questions or displaying information:
-
-- "\ [Test] [History] [Artifacts]"
-- "Exception message \"
-- "Callstack \"
-
-## Concerning Build Retry information
-
-Retry information is aggregated from
-
-- Current build failure information
-- Auto-retry driving telemetry in SQL "Known Failure" table
-
-Answering questions
-
-- "This test \ pass on retry"
-- Concerning Branch Status
-- "The target branch (master) \ failing" (AzDO "Get Build" API)
-
-## Concerning Historical build failure information
-
-- "This step first failed in master on \" (Kusto "Timeline*" tables)
-- "This step has failed \ out of \ runs in master, most recently on \" (Kusto "Timeline*" tables)
-
-## Concerning test history by branch, test pass/failure rate,
-
-- "There are test failures in this build for pipelines that are also failing in master" (AzDO "Query Test History" API)
-- "The test \ has failed out of runs" (AzDO "Query Test History" API)
-- "The test \ first failed on master at \ on \" (AzDO "Query Test History" API)
-- "The test \ latest failed on master at \ on \" (AzDO "Query Test History" API)
-- "The test \ was introduced on \" (AzDO "Query Test History" API)
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CDev%20WF%20Actionable%20PRs%5CCapabilities%20and%20Data.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CDev%20WF%20Actionable%20PRs%5CCapabilities%20and%20Data.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CDev%20WF%20Actionable%20PRs%5CCapabilities%20and%20Data.md)
-
diff -Nru "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Dev Workflow/Failure Guessing - arcade5963.md" "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Dev Workflow/Failure Guessing - arcade5963.md"
--- "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Dev Workflow/Failure Guessing - arcade5963.md" 2023-10-18 18:08:29.000000000 +0000
+++ "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Dev Workflow/Failure Guessing - arcade5963.md" 1970-01-01 00:00:00.000000000 +0000
@@ -1,74 +0,0 @@
-# Failure Guessing for Pull Requests
-
-We want to implement a mechanism that will trigger when a build fails and provides information (much like the [runfo](https://github.com/jaredpar/runfo/tree/master/runfo) tool operates) to the developer on the pull request that initiated the build, via a GitHub Check (if possible).
-
-The information relayed back to the developer should note the following:
-- link to an open, public GitHub issue if the failure appears to be known
-- that the failure encountered was novel
-
-Examples of failure notes:
-- `This test is also failing in the main branch` (link to open issue, if available)
-- `This is a known Mac disconnect issue` (link to open issue, if available)
-
-This functionality may provide comparable data that is already provided by the runfo tool. In other words, if there is functionality that already exists in runfo that does what we need to do, we should look at implementing similar functionality in our own project.
-
-## Stakeholders
-- Product teams' developers
-- Product teams' management
-- Engineering Services team's developers
-- Engineering Services team's management
-
-This feature is primarily for the product teams, however, the Engineering Services team should dogfood their own functionality.
-
-## Risks
-
-- Data could be misleading and provide incorrect assumptions on cause of failures. (Because this functionality is new, we may not be able to create algorithms strong enough to sufficiently raise the signal-to-noise ratio experienced by devs.)
-- Information relayed to the user may not contain actionable information (e.g. should they wait for other issues to be resolved? Do they need to fix a test? Is this something they need to report to Engineering Services team? et cetera)
-- Information relayed to the user may be noisy.
-- Ensure we are not hiding data that used to be visible
-
-### Proof of Concepts
-
-- Through a proof of concept, verify that we are able to build a service that will be able to accurately analyze the failure and provide relevant data (e.g. links to open GitHub issues regarding the failure) to the user regarding the failure.
-
- We will build a service that can take in an Azure DevOps build ID. This service will contain functionality that will detect failure patterns. Likely, this information will need to be provided by humans, so we will also need the ability to take in and store this information, such as in a JSON file. This should connect to a GitHub Checks API in order to report the failure analysis back to the user in the pull request.
-
- Additionally, we may want to build our POC as a command line tool initially, to ensure that the functionality works as expected before we integrate it with the infrastructure.
-
-- Through a proof of concept, verify that we are able to provide a mechanism for the repo owner to annotate their error messages so they can ignore non-useful error messages.
-
-### Dependencies
-
-- Helix Services
-- Azure DevOps
-- GitHub, GitHub Checks API
-- Helix
-
-## Serviceability
-
-- Unit and functional (where appropriate, for both) tests for the individual components of the service
-- Post-deployment scenario tests for the service to ensure that it is functioning as intended with all it's integrated parts.
-- Algorithms developed for this feature should all be unit tested in order to detect errors if the algorithm changes.
-- Ensure additional resources needed for this feature, such as additional data stores are well-documented for serviceability.
-
-### Rollout and Deployment
-
-- If the service is built in an existing project, such as Arcade Services or Helix Services, the rollout should be consistent with the existing rollouts for the project.
-
-## Usage Telemetry
-
-- Create a feedback mechanism (e.g. tracking image with a thumbs-up and thumbs-down) to include next to any communication so that the users can provide feedback quickly.
-- Links created in new areas (e.g. GitHub comments, Failure Summary in GitHub Checks) could pass-through an aka.ms redirect link for us to capture usage from.
-
-## Monitoring
-
-- This new service will require usage and health monitoring.
-
-## FR Hand off
-
-- We will create user documentation for how to configure the retries and expectations for viewing results.
-- We will create documentation for the Engineering Services team to investigate issues with the feature itself.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CDev%20Workflow%5CFailure%20Guessing%20-%20arcade5963.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CDev%20Workflow%5CFailure%20Guessing%20-%20arcade5963.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CDev%20Workflow%5CFailure%20Guessing%20-%20arcade5963.md)
-
diff -Nru "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Dev Workflow/Known Issues - arcade5963.md" "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Dev Workflow/Known Issues - arcade5963.md"
--- "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Dev Workflow/Known Issues - arcade5963.md" 2023-10-18 18:08:29.000000000 +0000
+++ "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Dev Workflow/Known Issues - arcade5963.md" 1970-01-01 00:00:00.000000000 +0000
@@ -1,68 +0,0 @@
-# Known Issues and Outage Reporting for Pull Requests
-
-We want to implement a notification that will be communicated to the developer on the pull request that initiated the build that notes any known outages that may cause or is causing failures. This will be similar to the Failure Guessing feature.
-
-Some of this data may already be available via [runfo](https://github.com/jaredpar/runfo/tree/master/runfo), so we should try to leverage the functionality that it has there, if it's useful for this feature. (e.g. recreating the functionality in our own code)
-
-Also, if possible, we'd like to automatically retry builds that have failed due to outages when the outage has been resolved.
-
-## Stakeholders
-- Product teams' developers
-- Product teams' management
-- Engineering Services team's developers
-- Engineering Services team's management
-
-This feature is primarily for the product teams, however, the Engineering Services team should dogfood their own functionality.
-
-## Risks
-
-- Data could be misleading and provide incorrect assumptions on cause of failures.
-- Information relayed to the user may not contain actionable information (e.g. should they wait for other issues to be resolved? Do they need to fix a test? Is this something they need to report to Engineering Services team? et cetera)
-- Information relayed to the user may be noisy.
-- It's possible that the amount of work required for this future will overlap greatly with the greater Outage epic that is on the backlog.
-- A manual process for tracking outages may mean issues that are tracking outages may be missed when the outage is resolved, or resolved prematurely.
-
-### Proof of Concepts
-
-- Through a proof of concept, verify that we are able to retry a build that had previously failed due to an outage. This will require us to track the builds that are affected by outages and then know when an outage has been resolved so that it can retry the failed builds.
-
- Additional things to consider:
- - Is a retry for the build already in progress?
- - Are there other things in the build that caused a failure that does not warrant a retry?
- - Should only builds of a certain "age" be retried? (e.g. what if a build is a week old before a reported outage is marked as resolved?)
- - Can a customer opt out of an automatic retry? (Would this make sense to have?)
-
-- Through a proof of concept, verify that we have a way of tracking outages that can be posted back to the pull request in GitHub via the GitHub Checks API to report this information back to the user. This may be a specific way of naming GitHub issues to track outages, or a service that will track the outages.
-
-### Dependencies
-
-- Helix Services
-- Azure DevOps
-- GitHub, GitHub Checks API
-
-## Serviceability
-
-- Unit and functional (where appropriate, for both) tests for the individual components of the service
-- Post-deployment scenario tests for the service to ensure that it is functioning as intended with all it's integrated parts.
-
-### Rollout and Deployment
-
-- If the service is built in an existing project, such as Arcade Services or Helix Services, the rollout should be consistent with the existing rollouts for the project.
-
-## Usage Telemetry
-
-- Create a feedback mechanism (e.g. tracking image with a thumbs-up and thumbs-down) to include next to any communication so that the users can provide feedback quickly.
-- Links created in new areas (e.g. GitHub comments, Failure Summary in GitHub Checks) could pass-through an aka.ms redirect link for us to capture usage from.
-
-## Monitoring
-
-- This new service will require usage and health monitoring.
-
-## FR Hand off
-
-- We will create user documentation for how to configure the retries and expectations for viewing results.
-- We will create documentation for the Engineering Services team to investigate issues with the feature itself.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CDev%20Workflow%5CKnown%20Issues%20-%20arcade5963.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CDev%20Workflow%5CKnown%20Issues%20-%20arcade5963.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CDev%20Workflow%5CKnown%20Issues%20-%20arcade5963.md)
-
diff -Nru "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Dev Workflow/Passed On Rerun Data.md" "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Dev Workflow/Passed On Rerun Data.md"
--- "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Dev Workflow/Passed On Rerun Data.md" 2023-10-18 18:08:29.000000000 +0000
+++ "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Dev Workflow/Passed On Rerun Data.md" 1970-01-01 00:00:00.000000000 +0000
@@ -1,60 +0,0 @@
-# Test passed on rerun data
-
-Analyzing tests that passed on rerun at a granular level as is a build probably sometimes doesn't feel enough, or probably you want to see all the tests that passed on rerun in a quick look.
-
-For the cases in which you want to see all the tests that passed on rerun in your build or repo or look for the most common failures on the test that passed after a rerun, and many other scenarios, you can do the following:
-
-1. Query the PassedOnRerun data using the [AzureDevOpsTests](https://dataexplorer.azure.com/clusters/engsrvprod/databases/engineeringdata?query=.show%20table%20AzureDevOpsTests) table, located in: [Engsrvprod/engineeringdata](https://dataexplorer.azure.com/clusters/engsrvprod/databases/engineeringdata)
-
-
- ```
- AzureDevOpsTests
- | where Outcome == "PassedOnRerun"
- | where BuildId == [buildId]
- | where Repository == [repository]
- ```
-
-1. Use [Build Analysis Reporting](https://msit.powerbi.com/links/crjYD5rwh0?ctid=72f988bf-86f1-41af-91ab-2d7cd011db47&pbi_source=linkShare) under `Test Passed on Rerun` tab to see aggregated data.
-
-
-## Example of queries
-
-1. Query by build Id:
-To get the buildId of your build you can navigate to your Azure DevOps build pipeline, see the URL, it should look like this:
-`https://dev.azure.com/dnceng/public/_build/results?buildId=1234567&view=results`
-Look for the `buildId=` in this case is `1234567` and use that id in your query.
-Ex.
- ```
- AzureDevOpsTests
- | where Outcome == "PassedOnRerun"
- | where BuildId == 1234567
- ```
-
-2. Query by repository*:
-In this example you can see all the assemblies in the last week for a repository, ordered by passed on rerun count
- ```
- AzureDevOpsTests
- | where RunCompleted > ago(7d)
- | where Outcome == "PassedOnRerun"
- | where Repository == "dotnet/runtime"
- | summarize count() by WorkItemFriendlyName
- | order by count_ desc
- ```
- * Examples of respositories: 'dotnet/roslyn', 'dotnet/runtime', 'dotnet/aspnetcore', 'dotnet/installer'
-
-
-3. Query tests by assembly:
-In this example you can see all the tests in the last week for an assembly, ordered by passed on rerun count
- ```
- AzureDevOpsTests
- | where RunCompleted > ago(7d)
- | where Outcome == "PassedOnRerun"
- | where WorkItemFriendlyName == "System.Net.Http.Functional.Tests"
- | summarize count(), any(Arguments) by TestName, ArgumentHash
- | order by count_ desc
- ```
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CDev%20Workflow%5CPassed%20On%20Rerun%20Data.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CDev%20Workflow%5CPassed%20On%20Rerun%20Data.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CDev%20Workflow%5CPassed%20On%20Rerun%20Data.md)
-
diff -Nru "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Dev Workflow/Retry Builds - arcade5963.md" "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Dev Workflow/Retry Builds - arcade5963.md"
--- "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Dev Workflow/Retry Builds - arcade5963.md" 2023-10-18 18:08:29.000000000 +0000
+++ "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Dev Workflow/Retry Builds - arcade5963.md" 1970-01-01 00:00:00.000000000 +0000
@@ -1,59 +0,0 @@
-# Automatically Retry Builds For Pull Requests
-
-We want to implement a mechanism in which builds that fail due to a reason that is defined in a configuration by the repository owners are automatically retried during pull request builds.
-
-The retry event will be communicated to the developer via GitHub on the pull request that initiated the build.
-
-## Stakeholders
-- Product teams' developers
-- Product teams' management
-- Engineering Services team's developers
-- Engineering Services team's management
-
-This feature is primarily for the product teams, however, the Engineering Services team should dogfood their own functionality.
-
-## Risks
-
-- Increase of resource costs because of retries, however, this should not be much more than the current cost it takes to re-run a build when a failure happens.
-- Increase in time to investigate failures due to lag introduced by the retries.
-- Rule management may become cumbersome.
-- Low adoption-rate due to configuration requirements.
-
-### Proof of Concepts
-
-- Through a proof of concept, verify that we are able to retry a build when a retryable scenario (customer configured) is detected. See if we can leverage similar functionality that exists in Runfo today that scans the build logs when a failure occurs, and if the reason for failure matches any of the retryable scenarios, a retry is attempted on the build. This mechanism should be built as a separate service, and connected to the build processes in Azure DevOps via a webhook.
-
-- Through a proof of concept, verify that we are able to incorporate a mechanism for customers to quickly and easily provide feedback via some kind of tracking image. This mechanism must be able to work with markdown, and will provide: 1) a like or dislike of the feature; and 2) capture usage tracking information. This data should be captured in Application Insights so that we can query upon the data to see how it is being used and if customers are approve or disapproving of the feature.
-
-### Dependencies
-
-- Helix
-- Arcade
-- Azure DevOps
-- GitHub, GitHub Checks API
-
-## Serviceability
-
-- Tests (unit, functional, E2E) for retry functionality
- - A failed build for a retryable reason should be retried.
- - A failed build for a reason that was not configured to be retried should not be retried.
-- Tests/Validation for functionality we write to support the GitHub Check that will report on the retry status of a work item.
-
-### Rollout and Deployment
-
-- If all the functionality for this exists in Arcade, then the rollout for this functionality should only be impacted by Arcade promotions. We will need to ensure that there's sufficient testing (E2E) in Arcade Validation for this feature.
-- Initial configuration and set up in the repositories will likely need assistance from the Engineering Services team to get the customers up and running.
-
-## Usage Telemetry
-
-- Create a feedback mechanism (e.g. tracking image with a thumbs-up and thumbs-down) to include next to any retry communication so that the users can provide feedback quickly.
-- Links created in new areas (e.g. GitHub comments, Failure Summary in GitHub Checks) could pass-through an aka.ms redirect link for us to capture usage from.
-
-## FR Hand off
-
-- We will create user documentation for how to configure the retries and expectations for viewing results.
-- We will create documentation for the Engineering Services team to investigate issues with the feature itself.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CDev%20Workflow%5CRetry%20Builds%20-%20arcade5963.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CDev%20Workflow%5CRetry%20Builds%20-%20arcade5963.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CDev%20Workflow%5CRetry%20Builds%20-%20arcade5963.md)
-
diff -Nru "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Dev Workflow/Retry Tests - arcade5963.md" "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Dev Workflow/Retry Tests - arcade5963.md"
--- "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Dev Workflow/Retry Tests - arcade5963.md" 2023-10-18 18:08:29.000000000 +0000
+++ "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Dev Workflow/Retry Tests - arcade5963.md" 1970-01-01 00:00:00.000000000 +0000
@@ -1,85 +0,0 @@
-# Automatically Retry Tests For Pull Requests
-
-We want to implement a mechanism in which work items that fail due to a reason that is defined in a configuration by the repository owners are automatically retried during pull request builds and tests.
-
-The work item can either be retried in the current environment, or, optionally, pushed to another machine to be retried.
-
-The retry will be communicated to the developer via GitHub on the pull request that initiated the test. We will ensure that failed attempts are not hidden from the user, and also communicated, as well. Full details of any failure shall be recorded.
-
-It is important to note that we can only retry based on a Helix Work Item since we are constrained by Azure DevOps and cannot retry on a per test basis.
-
-## Stakeholders
-- Product teams' developers
-- Product teams' management
-- Engineering Services team's developers
-- Engineering Services team's management
-
-This feature is primarily for the product teams, however, the Engineering Services team should dogfood their own functionality.
-
-## Risks
-
-- High implementation cost, however, the increase of green PRs due to automatically retrying failed tests would be a huge win.
-- Increase of resource costs because of retries, however, retries per work item is a lower cost than a retry of an entire build and test suite that users do today.
-- Increase in time to investigate failures due to lag introduced by the retries.
-- Rule management may become cumbersome.
-- Low adoption-rate due to configuration requirements.
-
-### Proof of Concepts
-
-- Through a proof of concept, verify that we are able to implement a retry mechanism in Arcade by enhancing the [Helix SDK scripts](https://github.com/dotnet/arcade/tree/main/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter) written in Python that will allow work items that fail and meet a certain criteria (defined in JSON) to be retried.
-
- Implementation details for this proof of concept are as follows:
- - Customer configures their desired retry rules in a pre-defined json file that will be provided in the /eng/ folder within Arcade.
- - When the build runs during the PR, if a work item fails that matches the rules defined in the json file, we will automatically retry that work item.
- - When the retry is initiated, a comment will be posted on the pull request that initiated the build/test suite remarking the failure and subsequent retry event. This information should also be communicated to the Failure Summary page through GitHub Checks.
- - The test will be retried the specified number of times as noted in the configuration, depending on subsequent failures.
-
-- Through a proof of concept, verify that we are able to incorporate a mechanism for customers to quickly and easily provide feedback via some kind of tracking image. This mechanism must be able to work with markdown, and will provide: 1) a like or dislike of the feature; and 2) capture usage tracking information. This data should be captured in Application Insights so that we can query upon the data to see how it is being used and if customers are approve or disapproving of the feature.
-
-### Dependencies
-
-- Helix
-- Arcade
-- Azure DevOps
-- GitHub, GitHub Checks API
-
-## Serviceability
-
-- Tests (unit, functional, E2E) for Python components in Arcade that will interact with Helix. (Provided the aforementioned proof of concept is acceptable, or for whatever mechanism is eventually written to support this functionality)
- - Tests for retry functionality on same machine
- - Tests for retry functionality sent to another machine
-- Tests/Validation for functionality we write to support the GitHub Check that will report on the retry status of a work item.
-- SDL considerations will need to be made for sentiment tracking.
-
-### Rollout and Deployment
-
-- If all the functionality for this exists in Arcade, then the rollout for this functionality should only be impacted by Arcade promotions. We will need to ensure that there's sufficient testing (E2E) in Arcade Validation for this feature.
-- Initial configuration and set up in the repositories will likely need assistance from the Engineering Services team to get the customers up and running.
-
-## Usage Telemetry
-
-- Create a feedback mechanism (e.g. tracking image with a thumbs-up and thumbs-down) to include next to any retry communication so that the users can provide feedback quickly.
-- Links created in new areas (e.g. GitHub comments, Failure Summary in GitHub Checks) could pass-through an aka.ms redirect link for us to capture usage from.
-- Usage telemetry:
- - Did the retry result in a passing test?
- - What triggered the retry?
- - Which work item did it trigger on?
- - How many retries were triggered for this work item?
-
-## Monitoring
-
-- The sentiment feature will need monitoring (e.g. sudden and/or excessive negative feedback may indicate something is wrong with a feature and is getting negative feedback from customers).
-- The retry functionality will need monitoring.
- - Does this increase CPU or resource usage?
- - Get a baseline of resource usage today before feature is implemented.
- - Other cost-related monitoring
- - Detect if the functionality is down/broken (e.g. a retry should've been triggered, but it was not)
-
-## FR Hand off
-
-- We will create user documentation for how to configure the retries and expectations for viewing results.
-- We will create documentation for the Engineering Services team to investigate issues with the feature itself.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CDev%20Workflow%5CRetry%20Tests%20-%20arcade5963.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CDev%20Workflow%5CRetry%20Tests%20-%20arcade5963.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CDev%20Workflow%5CRetry%20Tests%20-%20arcade5963.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/docker-image-usage-improvements.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/docker-image-usage-improvements.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/docker-image-usage-improvements.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/docker-image-usage-improvements.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,76 +0,0 @@
-# Background #
-Several of the .NET Core repositories utilize Docker images within their official build definitions for the supported Linux distros. Utilizing Docker allows a single Linux configuration to be used on all of the build agents yet the product can be built on all of the supported distros. Using Docker also allows the product teams to easily manage the build prereqs because they specified within the Dockerfiles and don't require VSO service engineers in order to change them.
-
-The following is a summary of the prescribed process currently being used for adding/updating the Docker images.
-
-1. Add a new or update the existing [DockerFiles](https://devdiv.visualstudio.com/DevDiv/_git/DotNetCore?path=%2Fdockerfiles&version=GBmaster&_a=contents) as necessary.
-2. Build the new/modified Dockerfiles locally. Tag the image using the following naming schema `_prereqs_v` (e.g. `ubuntu1610_prereqs_v3`)
-3. Verify the new/modified image works as expected.
-4. Get someone with the appropriate access to push the new/modified image to [Docker Hub](https://hub.docker.com/r/chcosta/dotnetcore/).
-5. Update the appropriate build pipelines to reference the new Docker images (e.g. [CoreFx build pipeline](https://github.com/dotnet/corefx/blob/94780d59037393369d22def54466b2e13d81c435/buildpipeline/pipeline.json))
-
-# Problems #
-
-## Out of Date or Missing Dockerfiles ##
-Some of the images we are currently using within our build definitions do not have the corresponding Dockerfiles they were generated from checked into SCC.
-
-1. Some images were created by running a base OS image and then manipulating it as necessary in order to add the required tools and dependencies. The resulting images were then captured using the [docker commit](https://docs.docker.com/engine/reference/commandline/commit/) command.
-2. Some images were created via Dockerfiles that were never checked into SCC.
-3. Some Dockerfiles were updated to produce newer versions of images but the updated Dockerfiles were never checked into SCC.
-
-Because we do not have the Dockerfiles for the images we are building with today, we don't really know with certainty what dependencies and toolsets we are building the product with. You can gather this information by inspecting the images we are building with but it is a time intensive process and prone to oversight. More importantly it is possible for the images on Docker Hub to be accidentally replaced or deleted. If this were to happen we would be in a very bad situation and would have to scramble to get the Docker images recreated.
-
-## Docker Image Versioning ##
-Nothing in the process used today enforces versioning of the Docker images. As described earlier, it is up to the individuals that have access to the Docker Hub repository to increment the Docker image version whenever an update is made. Hopefully this is being followed but it is susceptible to human judgement and error.
-
-If this process is not being followed, it is possible that updating an image without incrementing the version (e.g. changing the tag) would break the various builds that utilize the shared images. This issue may not surface itself immediately. For example it may only get surfaced at the time a previous release is serviced. Tracking down issues like this can be very time consuming and wasteful.
-
-## Docker Image Traceability ##
-There is no way to trace back from a Docker image to the Dockerfile it was generated with. For example, suppose we release version 1.0 of a product that was built with version 4 of a particular Docker image. A couple months pass and a service patch is needed for the release. For this particular service fix, a new version of a tool is needed. How do we correlate a Docker image back to the specific version of the Dockerfile it was generated from in order to update the required tool? There isn't a way to correlate a Docker image tag/version to a specific Dockerfile without manually inspecting what is installed on the image in relation to the file history of the Dockerfile.
-
-Another problem that has surfaced with our Dockerfiles is that they do not reference a static OS base image. The base OS images do get revised overtime with service patches. This has causes issues such as [dotnet/core-setup#1149](https://github.com/dotnet/core-setup/pull/1149). We should be explicitly making these changes and verifying the changes prior to rolling them into production.
-
-## Docker Hub Repository ##
-Currently the Docker images being used are stored in Chris Costa's personal [Docker Hub repository](https://hub.docker.com/r/chcosta/dotnetcore/). Relying on personally owned artifacts is not a good practice to use. Chris could leave the team, company or worse, which may lead to issues in administering these artifacts.
-
-## Docker Toolset ##
-The Docker toolset being used by the builds is not captured anywhere. Docker may introduce breaking changes or behavior changes over time that could be detrimental if introduced onto the build agents. Docker has been known to do this in the past (e.g. [Regression in LTTng behavior on Docker 1.10.2](https://github.com/docker/docker/issues/20818)). When the product is built, it should be using an explicit version of the Docker toolset so that we can ensure repeatability and reliability.
-
-# Proposed Changes #
-
-## Automated Builds ##
-Introducing automated builds would a great way to ensure we will always have the source Dockerfiles for the Docker images we use. The only way to update a Docker image would be to update the corresponding Dockerfile. When a Dockerfile change is merged in, a build would get kicked off automatically that would build the Docker image and upload it to Docker Hub. The build definition would use a service account to upload images to Docker Hub. This service account would be the only account that would have access to upload images. Therefore the only way to update the Docker images would be to make a change to the checked-in Dockerfiles.
-
-VSTF build definitions would provide the necessary functionality and flexibility to meet our requirements. Key vault should be utilized to store the credentials so that they are stored in a centralized place for servicing. If needed in a pinch, the credentials could be used manually to upload an image.
-
-## Tagging Scheme ##
-In order to provide traceability between a Docker image tag and the Dockerfile it was produced with, we should utilize a tagging scheme similar to the following:
-
-`.---`
-
-**Examples**
-
-- `opensuse.42.1-20170118-c760fcc`
-- `Ubuntu.14.04-crossbuild-20161210-b04b497`
-
-The automated builds would be capable of generating the tag from the Dockerfile location in SCC and the commit that triggered the build. A timestamp is suggested in addition to a commit sha simply as a means to quickly tell how old an image is and compare two tags in order to tell which one is older.
-
-## Docker Repository ##
-A new Docker repository should be used for our Docker images that is owned by an organization (e.g. `microsoft`). This will ensure that the dotnet organization will always have control over the repository as team members come and go. If we continue to use Docker Hub, we don't want a repository that could distract users from the official .NET Docker repository (e.g. `microsoft/dotnet`). Something like `microsoft/dotnet-buildtools-prereqs` could do that. If we wanted to obfuscate it more in order to avoid having the general public find it when looking for the real dotnet images, we could name it `microsoft/dnbpr` which would stand for `dotnet build prereqs`. This becomes a little unnatural. Couple this naming issue with the desire we have to be able to build the product without taking a dependency on non-MSFT services, we should use a private Docker registry. Azure currently has beta support for a [container registry](https://azure.microsoft.com/en-us/services/container-registry/) which would meet our needs.
-
-Using a custom Docker registry is pretty easy. You don't use the Azure CLI, you still use the Docker CLI. The differences are that first you must explicitly login to the private registry (`docker login dotnetcore-microsoft.azurecr.io -u *** -p ***`). Second you must include the registry name in all of the image requests such as run, pull, etc. (`docker pull dotnetcore-microsoft.azurecr.io/build_prereqs:ubuntu.14.04`)
-
-## Reference Base Images via Digest ##
-In order to solve the issues with the base images changing overtime due to service fixes, we should be [referencing the base images via digest](https://docs.docker.com/engine/reference/builder/#/from). This change makes the Dockerfile less readable by itself therefore it is recommended that we add a comment that clearly states what the base image is and the date it was produced.
-
-## Versioned Docker Toolset ##
-The solution for capturing the Docker toolset required by our builds and the mechanisms used to automatically acquire them are being covered by the work Matt Mitchell (Versionable Environments) and Ravi Eda ([Cmake versioning](https://github.com/dotnet/arcade/blob/main/Documentation/Project-Docs/cmake-scenarios.md)) are defining. It is sufficient for the scope of this document to say that whatever pattern comes out of this work should be applied to the Docker toolset.
-
-## Move Dockerfiles to Open ##
-Work has been going on recently to check-in the build definitions into the product repositories (e.g. [corefx](https://github.com/dotnet/corefx/tree/master/buildpipeline)). These build definitions reference our Docker images. Because of this, it would be beneficial to move the Dockerfiles from the [private repository](https://devdiv.visualstudio.com/DevDiv/_git/DotNetCore?path=%2Fdockerfiles&version=GBmaster&_a=contents) into the open. There are no trade secrets and they could be useful for others to see. A natural place to put these shared Dockerfiles would be within the [buildtools repo](https://github.com/dotnet/buildtools).
-
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5Cdocker-image-usage-improvements.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5Cdocker-image-usage-improvements.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5Cdocker-image-usage-improvements.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/fetch-internal-tooling.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/fetch-internal-tooling.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/fetch-internal-tooling.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/fetch-internal-tooling.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,35 +0,0 @@
-# Fetch Optional (Internal) Tooling
-
-This is an implementation plan for how to fetch sensitive internal tools during a .NET Core build.
-
-## Uploading a new tool
-
-The tool is put in a NuGet package and uploaded to a VSTS feed. VSTS feeds require authentication for any operation, and are secure.
-
-## Fetching during the build
-
-**To fetch internal tooling in your local dev build, see the [Running CoreFx tests on UAP (CoreCLR scenario) OneNote page
-](https://microsoft.sharepoint.com/teams/netfx/corefx/_layouts/OneNote.aspx?id=%2Fteams%2Fnetfx%2Fcorefx%2FDocuments%2FCoreFx%20Notes&wd=target%28Engineering%2FNet%20Standard%202.0.one%7CD8792BD0-63D5-4D0F-8EF0-B0F8444F49CD%2FRunning%20CoreFx%20tests%20on%20UAP%20%28CoreCLR%20scenario%5C%29%7C48A101A6-5621-4131-A49C-DA95C155D126%2F%29)**
-
-An `optional-tool-runtime/project.json` file in BuildTools specifies all required tooling that is only available from the internal VSTS feed. This is similar to [`tool-runtime/project.json`](https://github.com/dotnet/buildtools/blob/6a1400e631a097587246e973973e9fafe7ab6254/src/Microsoft.DotNet.Build.Tasks/PackageFiles/tool-runtime/project.json).
-
-In the official build, three properties are set for the `sync` call:
-
-```
-OptionalToolSource=https://devdiv.pkgs.visualstudio.com/_packaging/dotnet-core-internal-tooling/nuget/v3/index.json
-OptionalToolSourceUser=dn-bot
-OptionalToolSourcePassword=******
-```
-
-A target in BuildTools runs before the main project package restore, detects that these properties are set, then restores `optional-tool-runtime/project.json` into the `packages` directory. Build steps that need an optional tool can find it using `PrereleaseResolveNuGetPackageAssets`.
-
-The path to the project file can be overridden to specify repo-specific tooling, like in CoreFX: [dir.props#L303](https://github.com/dotnet/corefx/blob/30a0f7f753162b89ad110b4beba3fdeda434fe8c/dir.props#L303), [optional.json](https://github.com/dotnet/corefx/blob/30a0f7f753162b89ad110b4beba3fdeda434fe8c/external/test-runtime/optional.json).
-
-Devs who have the optional tooling packages but don't have convenient access to the VSTS feed can set `OptionalToolSource` to a directory to use it as an optional tool package feed.
-
-If `OptionalToolSource` isn't set, no optional tooling is restored.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5Cfetch-internal-tooling.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5Cfetch-internal-tooling.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5Cfetch-internal-tooling.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/helix-metrics.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/helix-metrics.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/helix-metrics.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/helix-metrics.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,124 +0,0 @@
-# Metrics
-
-## Overview
-We want to unify our reporting infrastructure so that we have a centralized way to report on metrics related the health and behavior of the helix services.
-We'll have a single place for defining all the interesting information about a metric (it's names, which services it applies to, levels it should be alerted at...),
-which we'll generate all the pieces we need.
- * Strongly typed wrappers to send metrics
- * Automatically deployed alerting rules
- * Visualizations
-These metrics will either be sent by the services themselves, or an external monitoring service.
-
-## Unified File
-We will have a centralized file to define all our metrics, something similar to
-``` YAML
-services:
- - missionControl
- - helixApi
-instances:
- - helix
- - missionControl
- - helixClient
-metrics:
- -
- name: DataMigrationQueueDepth
- instance: helix
- servicesAffected:
- - missionControl
- - helixApi
- warning: >100
- error: >1000
- alertIfMissing: true
-
-```
-This will define the services we monitor, the application insights instances that we are reporting to/from, and then a list of all metrics that
-we're report and alert.
-
-## Code Generation
-From the unified file, we'll run some pre-processing code to generate C# (and potentially other language) wrappers for reporting,
-``` C#
-public class HelixMetric
-{
- public void Track(double value);
-}
-
-public class HelixReportingProvider
-{
- public HelixMetric DataMigrationQueueDepth { get; }
-}
-
-...
-
-private HelixReportingProvider _reporting; // DI this
-
-public void SomewhereElse()
-{
- _reporting.DataMigrationQueueDepth.Track(900);
-}
-```
-
-We'll handle pre-aggregation based on recommendations
-[here](https://docs.microsoft.com/en-us/azure/application-insights/app-insights-api-custom-events-metrics#trackmetric).
-The metrics/events will be reporting to AI as customMetrics and customEvents
-If we need wrappers for other languages, we can work on that.
-
-The wrappers will be generated at build time, so there won't be a need to check them in, hopefully.
-
-## Monitoring Service
-Most monitoring can be handled by the services themselves, for example, when pulling an item from an Azure queue, it's trivial
-to report on the age of the item at that point (since it's part of the pulled item).
-
-However, sometimes this isn't true. For example, reporting on the queue _depth_ isn't something that's easily handled at pull time,
-and it shouldn't be done by every pulling instance, since it would just cause noise (there is only one queue depth to report).
-These will be handled by a separate monitoring service that runs and monitors external factors, like queue depth,
-or reachability of storage accounts, or such things.
-
-## Reporting
-We're going to use Power BI to start and see if that's a good fit for our alerting. Hopefully we can make it data
-driven as well, so that it can pull from our source file with some transformation.
-
-If Power BI doesn't work out, we'll use custom graphing in Mission Control, pulling from AI
-
-## Outage Service
-Knowing a metric is off is great for our ability to run our service, but an important part of a service is the ability
-to report out to users about downtime and outages.
-
-To this end, we need to create a fairly simple outage reporting service. It doesn't need to be much more than
-a table that lists services, any outages, and any notes that we have made about it's status.
-
-There should be an API listening here that AI alerts can report to. When that happens, we should open
-some sort of ticket (probably a github issue) so that it can be tracked/resolved.
-
-This won't consist of more than a fairly simple Azure Table, and a handful of webpages (overall status,
-report new outage, resolve existing outage).
-
-## Alerting
-Application Insights already has fairly good alerting based on custom metrics, which we will take advantage of.
-Using the single file, a resource template will be created, as shown
-[here](https://docs.microsoft.com/en-us/azure/monitoring-and-diagnostics/monitoring-enable-alerts-using-template).
-It will both email the outage to an alias (maybe dnceng) and notify the outage service so it can open an issue
-and possible mark an outage.
-
-All of these alerts will alert to the same email address and also report to an alerting service.
-
-## What to Measure
-In short: whatever it's feasible to measure. Having a metric reporting should be very low cost to the service,
-as long as it's not in a very tight inner loop, so over-reporting should be prefered to under-reporting.
-
-Some examples of things that we should measure ...
- * ... in any sort of producer/consumer model ...
- * ... the average production and consumption rates
- * ... the current depth of the backlog
- * ... the average delay between production and consumption
- * ... in any user exposed service ...
- * ... availablity
- * ... average response times for a fixed, known piece of work
- * ... in a destributed service ...
- * ... heartbeats inside the code that's doing work
- * ... of any external service we depend on
- * ... availability query (can we contact the service with the credentials we expect)
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5Chelix-metrics.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5Chelix-metrics.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5Chelix-metrics.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/HelixTestLogSearch/instructions.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/HelixTestLogSearch/instructions.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/HelixTestLogSearch/instructions.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/HelixTestLogSearch/instructions.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,88 +0,0 @@
-# How to use Helix test log search
-
-This functionality is accessible through an endpoint in Helix Services at `/logs/search`
-
-## Input
-
-All of the following are required parameters:
-- `repository`: The error string the program will search for
-- `searchString`: The public repository whose test logs will be parsed. The program will only search for non-internal test logs.
-- `startDate`: The start date of the date range in which the test logs will be searched. This argument must be in the format yyyy/MM/dd. For example, 2022-05-10. The date must be within the last 14 days.
-- `endDate`: The end date of the date range in which the test logs will be searched. This argument must be in the format yyyy/MM/dd. For example, 2022-05-10. The date must be within the last 14 days.
-- `responseType`: This argument is one of "Hits" or "HitsPerFile". The string must exactly match. This determines the type of response returned (sample responses can be found in the Outputs section below).
-
-Note that the `repository` and `searchString` values should be URL encoded when they are passed in through the browser.
-
-Example:
-
- http://localhost:8080/logs/search/dotnet%2Fruntime/Remote%20process%20failed%20with%20an%20unhandled%20exception./2022-07-14/2022-07-21/HitsPerFile/
-
-## Output
-
-### Hits
-Here is sample output for a `Hits` response. Each item in the hits array corresponds to a string match in a log file.
-
- {
- "filter": {
- "repository": "dotnet/runtime",
- "errorString": "Microsoft.DotNet.RemoteExecutor.RemoteExecutionException : Remote process failed with an unhandled exception.",
- "startDate": "2022-07-15T07:00:00Z",
- "endDate": "2022-07-21T07:00:00Z",
- "responseType": "Hits"
- },
- "hits": [
- {
- "lineContent": "Microsoft.DotNet.RemoteExecutor.RemoteExecutionException : Remote process failed with an unhandled exception.",
- "lineNumber": 29,
- "jobId": 20180433,
- "friendlyName": "System.Net.Security.Tests",
- "status": "Fail",
- "started": "2022-07-15T12:00:20.807Z",
- "finished": "2022-07-15T12:01:18.036Z",
- "consoleUri": "https://helixre107v0xdeko0k025g8.blob.core.windows.net/dotnet-runtime-refs-heads-release-50-0a95b79667f444c8ac/System.Net.Security.Tests/1/console.a9847084.log?helixlogtype=result",
- "queueName": "osx.1015.amd64.open",
- "attempt": 1
- },
- ...
- ],
- "occurrenceCount": 4,
- "filesCount": 4
- }
-
-
-### HitsPerFile
-Here is sample output for a `HitsPerFile` response. Each item in the hits array corresponds to one log file.
-
- {
- "filter": {
- "repository": "dotnet/runtime",
- "errorString": "Microsoft.DotNet.RemoteExecutor.RemoteExecutionException : Remote process failed with an unhandled exception.",
- "startDate": "2022-07-15T07:00:00Z",
- "endDate": "2022-07-21T07:00:00Z",
- "responseType": "HitsPerFile"
- },
- "hits": [
- {
- "occurrences": 1,
- "jobId": 20214225,
- "friendlyName": "System.Net.Security.Tests",
- "status": "Fail",
- "started": "2022-07-20T12:04:48.094Z",
- "finished": "2022-07-20T12:05:47.138Z",
- "consoleUri": "https://helixre107v0xdeko0k025g8.blob.core.windows.net/dotnet-runtime-refs-heads-release-50-db3c3d858764452e9d/System.Net.Security.Tests/1/console.57a6404e.log?helixlogtype=result",
- "queueName": "osx.1015.amd64.open",
- "attempt": 1
- },
- ...
- ],
- "occurrenceCount": 4,
- "filesCount": 4
- }
-
-
-## Other notes
-- There is a timeout of 120 seconds/2 minutes on the whole program. If the query and parsing time out, the program throws an error and will not return any data.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CHelixTestLogSearch%5Cinstructions.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CHelixTestLogSearch%5Cinstructions.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CHelixTestLogSearch%5Cinstructions.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/HelixTestLogSearch/one-pager.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/HelixTestLogSearch/one-pager.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/HelixTestLogSearch/one-pager.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/HelixTestLogSearch/one-pager.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,144 +0,0 @@
-# ✔️ Introduction
-## Requirements
-We want to create a REST API that allows users to find the frequency of a specific error string in a repository's Helix test logs. So we want to be able to query an endpoint with a `repository`, `error_string`, `start_date`, and `end_date` to get a list of all the error occurrences of the string in the build logs matching those arguments.
-
-# 🖥️ Implementation
-## High-level diagram
-
-
-## Logic
-1. Take and sanitize user input
-2. Execute Kusto query (filter by repository, date_ranges, fail status)
-3. Iterate through Kusto's results to retrieve log files' path in Azure Storage
-4. Read log file's content line by line using file stream
-5. Use string searching/matching to find error string in the lines.
-6. If match is found, keep track of it and increment occurrences found.
-7. Return the results found as a JSON object.
-
-### Kusto query
- Jobs
- | where Repository == REPO_NAME
- | project JobId, IsExternal
- | join kind = inner WorkItems on JobId
- | project JobId, FriendlyName, Status, Started, Finished, ConsoleUri, QueueName, Attempt, IsExternal
- | where Status == 'Fail'
- | where Started between(START_DATE .. END_DATE) | where IsExternal == 1";
-
-## Input
-- Arguments
- - `repository`
- - `error_string`
- - `start_date`
- - `end_date`
-
-- Constraints
- - `repository` must be an existing, public repository. Its spelling must match the repo name exactly.
- - `error_string` should probably have some kind of limit on length.
- - The duration between `start_date` and `end_date` should have a maximum of 7 days). If user input exceeds this value, one possible way of handling this is to just query jobs between our defined max number of months before the given `end_date` and alert the user that this was done instead of their original query.
-
-## Dependencies
-- Kusto
-- Azure Account Storage
-
-## String Matching
-The three possible string matching methods ranked by speed/performance are:
-1. C# `String.contains`, `String.replace`, etc
-2. Boyer-Moore string searching algorithm
-3. Regex
-
-This ranking is based on the following articles:
-
-[Boyer-Moore VS String.contains](http://www.blackbeltcoder.com/Articles/algorithms/fast-text-search-with-boyer-moore)
-
-**TLDR;** Although Boyer-Moore is considered one of the fastest string-matching algorithms, C#'s `String.contains` method is faster as it uses assembly optimization. Although we might need a performance test since we will need to go line by line and load the strings from each log file if we use `String.contains` and that might take even longer.
-
-[String.contains VS Regex.isMatch](https://theburningmonk.com/2012/05/performance-test-string-contains-vs-string-indexof-vs-regex-ismatch/#:~:text=As%20you%20can%20see%2C%20Regex.IsMatch%20is%20by%20far,turned%20out%20to%20be%20significantly%20faster%20than%20String.IndexOf.)
-
-**TLDR;** Regex matching is way slower than String methods. It's only more useful if we want to pattern match as opposed to finding a fixed string. (Actually this raises the question - do we want to pattern match?)
-
-Also, this article [Fastest Ways to Count Substring Occurences in C#](https://cc.davelozinski.com/c-sharp/c-net-fastest-way-count-substring-occurrences-string) compares the speeds of different methods of counting substring occurences.
-
-**TLDR;** Using BCL was the fastest method for the following performance tests:
-> Counting the number of times 1 string occurs in 5,000, 25,000, 100,000, and 1,000,000 million strings.
->
-> Counting the number of times 100 strings occur in 5,000, 25,000, 100,000, and 1,000,000 million strings.
->
-> Counting the number of times 1,000 strings occur in 5,000, 25,000, 100,000, and 1,000,000 million strings.
-
-It also corroborates the article saying Regex matching is very slow for long strings.
-
-**❕ Decision is to use `String.Contains` (BCL) for now and stick with fixed string matching. Notes were made in additional features section to possibly include pattern matching down the road.**
-
-## File Reading
-Since we will potentially need to be reading text from thousands of files, it's worth taking a look at fastest ways to read file input. The following article benchmarks the time it takes for different ways of reading file input.
-
-[Fastest Ways to Read Text Files in C#](https://cc.davelozinski.com/c-sharp/fastest-way-to-read-text-files)
-
-**TLDR;** There was no one fastest method found, but in general, reading line by line and storing each line into a string was fast, and should be sufficient for this program. We can also make it faster using parallel threads if needed.
-
-**❕ We want to read different log files async using some version of `Task.WhenAll` to read the files concurrently.**
-
-## Output
-#### Possible JSON output:
- {
- "filter": {
- "repository": "...",
- "error_string": "",
- "start_date": "",
- "end_date": "",
- "num_hits": 00,
- },
- "hits":
- [
- {
- "document_uri": "uri to document",
- "job_id": "helix guid"
- "friendly_name": "",
- "started": "",
- "finished": "",
- "queue_name": "",
- },
- {
- "document_uri": "uri to document",
- "job_id": "helix guid"
- "friendly_name": "",
- "started": "",
- "finished": "",
- "queue_name": "",
- },
- ...
- ]
- }
-
-# 👓 Proof-of-Concept
-The plan for now is reading log files line by line and using `String.Contains`. We also want to use `Tasks` to parse each log file in parallel. Currently, I’m taking the following steps to implement POC:
-
-1. Write code for parsing a file using a hardcoded URI and getting in the data we want to return
-2. Replace the hardcoded URI with the actual URIs retrieved from a Kusto Query and looping through multiple URIs (and eventually the thousands that are actually returned).
-3. Deploy the POC so that we can run it on the same data centre that the logs are stored so we can see the actual speed of the program
-
-- Will test out string matching on a fixed number of log files first to see the speed on a local machine (and we also want to see the speed of actually running it on servers)
-
-# 📓 Additional Notes
-
-### Possible additional features
-- Include line number and character index that a string match was found
-- Allow user to pass either/or these 2 options as arguments:
- - `repository`, `error_string`, `date_range`
- - `build_id` list
-- Taking an optional parameter for context lines (e.g also return the 5 lines surrounding the hit line - think GDB)
-- Allowing pattern matching using regex (currently only allow for fixed string matching)
-- Allow user to pass token to authenticate and allow search in non-external jobs
-- Include retries in the output (`Attempt` and `LocalIteration` columns from `Files` table)
-
-### Issues/questions to look into down the road
-- Possibly use a profiler (like VS profiler) to look more into performance
-- Eventually we want to deploy to use the same data centres as the logs in Azure
-- Look more into handling failure cases like limiting user input i.e only 1 outstanding request allowed per person also “(limiting the input sizes, like only X total days, or Y total logs to scan), returning a partial result if we run out of time, a stateful server request, where you could ask "hey, I started this query a bit ago, do you have the answer yet"... Lots of exciting options!
-- Keep in mind the constraints for date range input - for now we are using a 7 day max duration but this can be changed if it is actually faster than expected
-
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CHelixTestLogSearch%5Cone-pager.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CHelixTestLogSearch%5Cone-pager.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CHelixTestLogSearch%5Cone-pager.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/HelixTestLogSearch/poc-summary.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/HelixTestLogSearch/poc-summary.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/HelixTestLogSearch/poc-summary.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/HelixTestLogSearch/poc-summary.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,141 +0,0 @@
-# Helix Test Log Search POC Summary
-To recall, this POC queries Kusto for failed test logs for a specified repository and date range, and parses these logs line by line for a given string.
-
-In general, since we are limiting the number of days a user can search for a string, depending on the number of files that are returned by the Kusto query, the program will run for anywhere between 20s and 1m30s.
-
-## 📋 How to use
-The program requires all arguments to be passed when running it in command line:
-
- dotnet run --errorString={SEARCH_STRING} --repository={REPOSITORY_NAME} --startDate={STARTING_DATE_RANGE} --endDate={ENDING_DATE_RANGE} --mode={RESULT_TYPE}
-
-See the Input section below for more details on input constraints.
-
-
-### Input
-Run the console app in terminal and pass all of the following options:
-
-- `errorString`: The error string the program will search for
-- `repository`: The public repository whose test logs will be parsed
-- `startDate`: The start date of the date range in which the test logs will be searched. This argument must be in the format **yyyy/MM/dd** and is in UTC.
-- `endDate`: The end date of the date range in which the test logs will be searched. This argument must be in the format **yyyy/MM/dd** and is in UTC.
-- `mode`: Either `"Hits"` or `"HitsPerFile"`. Hits will return an array of all occurences of `errorString`, including each occurrence's line number within its test log file. HitsPerFile will return an array of hits per file, including the count of occurrences per file.
-
-#### Example
-
- dotnet run --errorString="UseCallback_BadCertificate_ExpectedPolicyErrors" --repository=dotnet/runtime --startDate=2022-06-01 --endDate=2022-06-07 --mode=HitsPerFile
-
-This command will search for all instances of the string "UseCallback_BadCertificate_ExpectedPolicyErrors" in all the `dotnet/runtime` Helix test logs created between June 1, 2022 and June 7, 2022. It will return the number of hits found per file.
-
-### Output
-
-Here is some sample output showing the 2 different response types.
-
-#### Hits
- {
- "filter": {
- "repository": "dotnet/runtime",
- "errorString": "UseCallback_BadCertificate_ExpectedPolicyErrors",
- "startDate": "2022-05-25T07:00:00Z",
- "endDate": "2022-06-07T07:00:00Z"
- },
- "hits": [
- {
- "lineContent": "System.Net.Http.Functional.Tests.SocketsHttpHandler_HttpClientHandler_ServerCertificates_Test.UseCallback_BadCertificate_ExpectedPolicyErrors(url: \"https://wrong.host.badssl.com/\", expectedErrors: RemoteCertificateNameMismatch) [FAIL]",
- "lineNumber": 129,
- "jobId": 19894144,
- "friendlyName": "System.Net.Http.Functional.Tests",
- "status": "Fail",
- "started": "2022-06-01T12:36:28.573Z",
- "finished": "2022-06-01T12:42:46.786Z",
- "consoleUri": "https://helixre107v0xdeko0k025g8.blob.core.windows.net/dotnet-runtime-refs-heads-release-50-159e0decb4474dbfbb/System.Net.Http.Functional.Tests/1/console.52339d8a.log?helixlogtype=result",
- "queueName": "ubuntu.1804.armarch.open",
- "attempt": 1
- },
- {
- "lineContent": "/_/src/libraries/Common/tests/System/Net/Http/HttpClientHandlerTest.ServerCertificates.cs(334,0): at System.Net.Http.Functional.Tests.HttpClientHandler_ServerCertificates_Test.UseCallback_BadCertificate_ExpectedPolicyErrors_Helper(String url, String useHttp2String, SslPolicyErrors expectedErrors)",
- "lineNumber": 142,
- "jobId": 19894144,
- "friendlyName": "System.Net.Http.Functional.Tests",
- "status": "Fail",
- "started": "2022-06-01T12:36:28.573Z",
- "finished": "2022-06-01T12:42:46.786Z",
- "consoleUri": "https://helixre107v0xdeko0k025g8.blob.core.windows.net/dotnet-runtime-refs-heads-release-50-159e0decb4474dbfbb/System.Net.Http.Functional.Tests/1/console.52339d8a.log?helixlogtype=result",
- "queueName": "ubuntu.1804.armarch.open",
- "attempt": 1
- },
- ...
- ],
- "occurrenceCount": 96,
- "filesCount": 24
- }
-
-#### Hits per File
- {
- "filter": {
- "repository": "dotnet/runtime",
- "errorString": "UseCallback_BadCertificate_ExpectedPolicyErrors",
- "startDate": "2022-05-25T07:00:00Z",
- "endDate": "2022-06-07T07:00:00Z"
- },
- "hits": [
- {
- "occurrences": 4,
- "jobId": 19902082,
- "friendlyName": "System.Net.Http.Functional.Tests",
- "status": "Fail",
- "started": "2022-06-02T12:16:47.584Z",
- "finished": "2022-06-02T12:21:47.023Z",
- "consoleUri": "https://helixre107v0xdeko0k025g8.blob.core.windows.net/dotnet-runtime-refs-heads-release-50-ab8bca7d45fc4cd0bb/System.Net.Http.Functional.Tests/1/console.e36c2801.log?helixlogtype=result",
- "queueName": "ubuntu.1804.armarch.open",
- "attempt": 1
- },
- {
- "occurrences": 4,
- "jobId": 19909540,
- "friendlyName": "System.Net.Http.Functional.Tests",
- "status": "Fail",
- "started": "2022-06-03T13:37:32.456Z",
- "finished": "2022-06-03T13:43:35.135Z",
- "consoleUri": "https://helixre107v0xdeko0k025g8.blob.core.windows.net/dotnet-runtime-refs-heads-release-50-e3fe329caffc4ab3be/System.Net.Http.Functional.Tests/1/console.76909294.log?helixlogtype=result",
- "queueName": "ubuntu.1804.armarch.open",
- "attempt": 1
- },
- ...
- ],
- "occurrenceCount": 547,
- "filesCount": 17
- }
-
-
-## 🔍 Performance findings
-
-We can take a look at how long the program ran for different volumes of logs retrieved and parsed, as well as for different lengths of the search string.
-
-For one given repo and a one word string, we found:
-
-| Start date | End date | # of Occurrences | # of Files with hits | Total time elapsed | Lines scanned/sec | Files scanned/sec | Total files scanned
-| ----------- | ----------- |-------------- | --------------------- | ------------------ | ----------------- | ----------------- | ------------------ |
-| 2022/06/01 | 2022/06/07 | 2528 | 20 | 00:00:31.7819027 | 2532.73 | 0.69 | 22
-| 2022/05/25 | 2022/06/07 | 22768 | 510 | 00:00:35.2533931 | 41198.75 | 14.60 | 515
-| 2022/05/07 | 2022/06/07 | 124973 | 3023 | 00:00:41.9848476 | 193610.51 | 79.95 | 3357
-
-For one given repo and a string with 6 words:
-
-| Start date | End date | # of Occurrences | # of Files with hits | Total time elapsed | Lines scanned/sec | Files scanned/sec | Total files scanned
-| ----------- | ----------- |-------------- | --------------------- | ------------------ | ----------------- | ----------------- | ------------------ |
-| 2022/06/01 | 2022/06/07 | 547 | 17 | 00:00:31.2928707 | 2572.31 | 0.70 | 22
-| 2022/05/25 | 2022/06/07 | 6106 | 124 | 00:00:28.0314279 | 51813.12 | 18.37 | 515
-| 2022/05/07 | 2022/06/07 | 16431 | 310 | 00:00:49.0204510 | 165822.79 | 68.48 | 3357
-
-Note that the time taken can vary based on the repository, date range, network, etc so this is just a snapshot of a couple of times of running the program. But in general the times are pretty consistent and stay under a minute.
-
-## 🤔 Future considerations
-- More input sanitization to limit number of rows returned by Kusto and avoid timeouts, also to avoid injections into the query
-- Error handling for if we try to parse a file that doesn't exist on the server anymore
-- Before implementing functionality to accept a token from the user to access non-public work item logs, consider how to handle when a user enters a private repo
-
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CHelixTestLogSearch%5Cpoc-summary.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CHelixTestLogSearch%5Cpoc-summary.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CHelixTestLogSearch%5Cpoc-summary.md)
-
Binary files /tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/HelixTestLogSearch/Resources/project-diagram.png and /tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/HelixTestLogSearch/Resources/project-diagram.png differ
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/ibcmerge.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/ibcmerge.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/ibcmerge.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/ibcmerge.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,14 +0,0 @@
-# IBCMerge during the CoreFX build
-
-This is an implementation plan to enable IBC training data merging during the CoreFX official build.
-
-The CoreFX official build fetches IBCMerge.exe using the internal tooling flow. See [fetch-internal-tooling.md](fetch-internal-tooling.md).
-
-Packages containing IBC data are restored using a project with an auto-updated dependency. Tentatively in https://github.com/dotnet/corefx/tree/master/external.
-
-If IBC merging is enabled by an msbuild property, the build uses `ibcmerge.exe` to merge IBC data into assemblies where applicable. A BuildTools target performs the merging between the `build` and `sign` sections of the build. The result is that signed official binaries contain merged IBC info.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5Cibcmerge.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5Cibcmerge.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5Cibcmerge.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/improve-arcade-reliability.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/improve-arcade-reliability.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/improve-arcade-reliability.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/improve-arcade-reliability.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,48 +0,0 @@
-# Improve Arcade Reliability
-
-The goal of this effort is to help us improve Arcade Validation by filling in its test gaps, while mitigating potential build breaks before the latest version of Arcade is pushed out to customers.
-
-## Step 1
-
-We will reassess this step after two weeks to see if we're getting any value out of it.
-
-1. Create a scheduled pipeline similar to Arcade Validation today, with some alterations:
- 1. If Arcade-Validation passes, create a branch of the `runtime` and `roslyn` repos using the last known good build within the last three days. (If there are no last known good builds in the last three days, we will skip building the latest Arcade against that repository.)
- 2. Update those branches with the latest version of Arcade being validated.
- 3. Build those branches and check for errors.
- 1. Should any errors occur, Arcade SMEs should help to determine if the errors are related to Arcade or not.
- 1. If the errors are related to Arcade, authors of commits included in the latest Arcade build since the last Arcade version release, must be contacted.
- 2. Authors will be expected to fix whatever in their commits broke the other repos.
- 3. Authors will be expected to add tests/validation to Arcade-Validation so that it will be caught in the future.
- 2. If the errors are unrelated to Arcade we will send the Arcade build to the Latest channel to proceed with the normal dependency flow.
- 4. The pipeline should run 3 times a week, scheduled during non-peak hours.
- 5. Clean up branches of non Arcade-Validation repos used.
- 6. Ensure that we can use existing telemetry to capture any data from the builds. Data from SME triage/investigation can be stored in an Excel spreadsheet (or another low cost document.) Data we want to capture:
- 1. Repo and SHA branched from (since we are using the last known good build in the last three days).
- 2. Result of the investigation:
- 1. Passing (document that we ran Arcade through that branch and it was passing)
- 2. Failure due to product (this will include the scenario if there are no last known good builds in the last three days, thus, we do not build Arcade against that repo)
- 3. Failure due to Arcade/infra failure (this will result in any authors fixing the bug and contributing to the tests in Arcade Validation)
- 4. Failure due to "Impedance Mismatch" (this is when there's too much churn)
-2. After this process is set up, we will turn off automatic dependecy flow of Arcade.
-
-## Step 2
-
-1. As test gaps are identified in Arcade Validation, we need to fill in those gaps with tests. This will be an ongoing process as test gaps are identified during the short term solution.
-2. Once we are satisified with the robustness of Arcade Validation, we can re-enable automatic dependency flow of Arcade to the Latest channel.
-3. Ensure that we have process/policy in place to promote adding validation to Arcade Validition when changes are made to Arcade.
-
-## Known Testing Gaps
-
-| Issue | Description | Root Cause | Resolution |
-| ----- | ----------- | ---------- | ---------- |
-| https://github.com/dotnet/arcade/issues/4660 | Roslyn signed builds failed to publish on 1/20/2020 to the package feeds in Dnceng due to authorization issues after taking an Arcade update. | A refactoring of powershell scripts done in 11/21/2019 made it so the script that enables the authentication for publishing and restore across AzDO account boundaries stopped running. | Made sure the script always runs during AzDO builds in https://github.com/dotnet/arcade/pull/4661 |
-| https://github.com/dotnet/arcade/issues/4759 | Roslyn signed builds failed to queue due to a missing variable group after taking an Arcade update. | We refactored a YAML template so that the set of common variables that are required by post-build validation and publishing were shared across the stage instead of being referenced in each individual job. This caused the Validation stage to try and load a variable group that didn’t exist in DevDiv. This break made it apparent that SDL validation was never set up to work for repos outside of dnceng as there was a variable group missing. | The variable group was created in DevDiv with the required variables and subsequent builds were queued successfully. |
-| https://github.com/dotnet/arcade/issues/4748 | A Roslyn build was published to the “.NET Core SDK 3.1.2xx” channel in the Build Asset Registry, but no packages were actually published, so the Dependency flow PRs opened by this build were all failing to restore the packages. | The branch that produced the build was using a version of Arcade that didn’t have the publishing set up for that channel. | The branch was updated to use the latest arcade in the “.NET 3 Eng” channel, which brought in the correct publishing templates, and we’re working on adding a warning to builds that try to publish to channels and there’s no publishing implementation available in the YAML templates flowed to that branch. |
-| https://github.com/dotnet/arcade/issues/4775 https://github.com/dotnet/arcade/issues/4728 | The darc version that gets installed by default from the darc-init scripts fails during any operation that uses the paged APIs. | The auto rest client generator generated an invalid client that would fail with a 404 for any APIs that are paged, such as update-dependencies, get-builds, or get-asset. | The generator, and the generated client Darc have already been patched, but we require a production deployment so that the darc-init scripts install a fixed version by default. |
-| https://github.com/dotnet/arcade/issues/4860 | | Roslyn Updated the SDK version they use to build their compilers to 3.1, this change flowed to Arcade, and caused every Repo that wasn’t using a 3.1 SDK to break when compiling certain types of code that we don’t build in Arcade itself | |
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5Cimprove-arcade-reliability.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5Cimprove-arcade-reliability.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5Cimprove-arcade-reliability.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/MaestoTask_TestPlan.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/MaestoTask_TestPlan.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/MaestoTask_TestPlan.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/MaestoTask_TestPlan.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,140 +0,0 @@
-**Goals**
-
-Add test coverage for the Maestro.Task project with particular focus on the code that changed as part of the post-build signing changes. Each included method will have test coverage for the golden path(s) as well as interesting variations and missing/invalid input. I plan to start with the golden path and then use a combination of the VS code coverage tool and logic to identify the rest of the test cases for each method.
-
-Note that the specific test cases listed are an initial guess based on reading code that I am new to and are likely to change as my understanding of the code changes.
-
-**Methods to be covered (ones that were changed for post-sign build have a *** and will be the first to get coverage):**
- - [ ] ***PushMetadataAsync (this will get tests late even enough it was changed because most of its logic is calling the other methods listed below, so I'm going to use Code Coverage to guide the test case creation)
- - [ ] GetBuildDefaultChannelAsync
- - [ ] Positive cases, validate that the correct subset of channels is returned
- - [ ] Given all valid data (known good data set from a repo)
- - [ ] Given GitHub source data
- - [ ] Given AzDo source data
- - [ ] Given both GitHub & AzDo source data
- - [ ] Negative cases, validate that the returned data is empty or a meaningful exception is thrown
- - [ ] Given that both GitHub & AzDo are empty
- - [ ] Given empty/null values in other properties that are logged/checked
- - [ ] GetBuildDependenciesAsync
- - [ ] Positive cases, validate that the correct subset of BuildRefs is returned
- - [ ] Given golden path (build is found and it has a set of dependencies that have all relevant values filled in)
- - [ ] Given build that has product dependencies
- - [ ] Given build that has tool dependencies
- - [ ] Given a build with both product & tool dependencies
- - [ ] Given a build with non-required fields empty (should have no impact on the returned list)
- - [ ] Given one dependency with no builds found (GetBuildId returns null & this method logs then continues, return an empty list )
- - [ ] Given a build with no dependencies (return an empty list)
- - [ ] Negative cases, expect a meaningful exception is thrown
- - [ ] Given an empty RepoRoot
- - [ ] Given that the RepoRoot is not defined in the input file
- - [ ] Given an incorrect (invalid) RepoRoot
- - [ ] GetBuildId
- - [ ] Positive cases, validate that the correct buildId is returned and that the correct assets have been added to the assetCache
- - [ ] Golden path (given assets have a matching commit, buildId has a value, and build has assets in it aren’t in the given list)
- - [ ] Given assets are the only assets for build
- - [ ] Negative, expect a meaningful expection is thrown
- - [ ] Given assets don’t have a matching commit so no buildId is found (no exception, return null)
- - [ ] Given assets are missing field values that are expected/used in logic
- - [ ] Given null/empty arguments (if possible from caller)
- - [ ] ***GetBuildManifestsMetadata
- - [ ] Positive cases, validate contents of returned values (buildsManifestMetadata, signingInfo, manifestBuildData)
- - [ ] Given a single manifest
- - [ ] Given multiple manifests
- - [ ] Given no manifests
- - [ ] Negative cases, expect a meaningful exception is thrown
- - [ ] Given a file that isn’t a manifest
- - [ ] Given badly formatted XML
- - [ ] Given an empty manifest file (valid XML formatting but with nothing in it)
- - [ ] Given a set of manifests missing various pieces of expected data (examples below, not a complete matrix)
- - [ ] Manifest without any assets listed
- - [ ] Manifest that has an asset that contains a package with no version set
- - [ ] Manifest that does not contain any Blobs
- - [ ] Manifest with a blob that does not have a version set
- - [ ] Manifest with a blob that does not have any assets
- - [ ] Given two manifests that have different attributes (expect exception thrown in method)
- - [ ] AddAsset
- - [ ] Positive cases, validate that the asset has been added to to the asset list
- - [ ] Given golden path with a combo of shipping and non-shipping assets
- - [ ] Given empty string parameters
- - [ ] Negative cases, expect a meaningful exception is thrown
- - [ ] Given null parameters (if allowed by caller)
- - [ ] MergeBuildManifests
- - [ ] Positive cases, validate the contents of the merged manifest BuildData
- - [ ] Given the golden path (two BuildData objects with compatible manifests in GitHub)
- - [ ] Given the golden path with a mirrored repo
- - [ ] Given three compatible BuildDatas
- - [ ] Given compatible BuildDatas with null/empty assets
- - [ ] Given compatible BuildDatas with existent but partially empty/invalid assets
- - [ ] Negative cases, expect a meaningful exception is throwm
- - [ ] Given two incompatible BuildDatas
- - [ ] Given compatible BuildDatas with duplicated assets
- - [ ] ***MergeSigningInfo
- - [ ] Positive cases, validate the content of the merged SigningInformation
- - [ ] Given two SigningInformation objects that are compatible and contain different information
- - [ ] Given two duplicate SigningInformation objects
- - [ ] Given two SigningInformation objects where one is missing some values
- - [ ] Negative cases, expect a meaningful exception
- - [ ] Given two SigningInformation objects that are not compatible (exception thrown by method)
- - [ ] Given null/empty arguments (if possible from caller)
- - [ ] LookingForMatchingGitHubRepository
- - [ ] Positive cases, validate that the BuildData is updated with the correct infomration
- - [ ] Given a BuildData that is based on GitHub (non-mirrored repo)
- - [ ] Given a BuildData that is based on AzDo (mirrored repo) where GitHub is the current mirror
- - [ ] Given a BuildData that is based on AzDo (mirrored repo) where AzDo is the current mirror
- - [ ] Negative cases, expect a meaningful exception
- - [ ] Given a BuildData with an invalid url for the AzureDevOpsRepository value
- - [ ] Given null/empty arguments (if possible from caller)
- - [ ] ***GetManifestAsAsset
- - [ ] Positive cases, validate the contents of the AssetData and that it has been added to blobSet
- - [ ] Given a list of AssetData with a location string and a manifest file that exists
- - [ ] Given a list of AssetData with a location string and a manifest file that does not exist
- - [ ] Given a golden path where the AssetVersion is set
- - [ ] Given a golden path where the AssetVersion is not set, but there is a non-shipping asset with a version
- - [ ] Given a golden path where the AssetVersion is not set and there is not a non-shipping asset with a version
- - [ ] Given an empty list of AssetData (expect an empty list returned)
- - [ ] Given a null location
- - [ ] Negative cases, expect a meaningful exception
- - [ ] Given other null/empty arguments (if allowed by caller)
- - [ ] Given blobSet that already has a key with the same name as an asset (if possible to get into that state)
- - [ ] ***CreateAndPushMergedManifest
- - [ ] Positive cases, validate content of manifest file
- - [ ] Given a list of compatible assets with SigningInformation & ManifestBuildData within expected values
- - [ ] Given assets that are blobSet
- - [ ] Given assets that are not in blobSet
- - [ ] Given assets where some are in blobSet and some are not
- - [ ] Given assets that are non-shipping
- - [ ] Given assets that are shipping
- - [ ] Given other empty/null arguments (as allowed by caller)
- - [ ] Given a mergedManifestPath that does not exist
- - [ ] Given a mergedManifestPath that already contains a file with the same name
- - [ ] Negative cases, expect a meaningful exception
- - [ ] Given null SigningInformation
- - [ ] Given SigningInfo that has some null/empty values
- - [ ] ***SigningInfoToXml (this is going to be a fine line between testing our logic and testing XDocument, so these test cases are very likely to evolve as I write them)
- - [ ] Positive cases, validate content of the returned XElement
- - [ ] Given a SigningInformation with a single one of each of the types parsed and all values filled in
- - [ ] Given a SigningInformation with multiple of the types parsed, all of which contain the expected values
- - [ ] Negative cases, expect a meaningful exception
- - [ ] Given a SigningInformation missing various top level values that are assumed present (if allowed from caller)
- - [ ] Given a SigningInformation with nested values missing
- - [ ] Scenario Tests
- - [ ] Positive cases, validate that the BAR database has been updated with the correct information and that the new build exists
- - [ ] All values and input are valid throughout the pipeline
- - [ ] Negative cases, a user friendly error message is returned
- - [ ] RepoRoot is null or empty
- - [ ] AssetVersion is null or empty
-
-**Non-Goals**
-Test coverage for any code outside of the Maestro.task project.
-
-**Methods to be excluded:**
-
-- “GetXYZ” methods where it only returns a string from the environment or calls an outside function without applying interesting logic.
-
-**Risk**
-This is fairly low risk, since it’s going to be based on the other existing test projects. There are some changes required in the product code to allow for DI and mocking attachment points, that’s the biggest risk in this set of changes.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CMaestoTask_TestPlan.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CMaestoTask_TestPlan.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CMaestoTask_TestPlan.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/OneLocBuild/one-pager.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/OneLocBuild/one-pager.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/OneLocBuild/one-pager.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/OneLocBuild/one-pager.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,60 +0,0 @@
-# Generating the Localization Index File (`LocProject.json`) for the New Localization System (OneLocBuild)
-
-## Project Summary
-For a variety of reasons, the localization workflow is changing and we need to [migrate to the new loc system](https://github.com/dotnet/arcade/issues/6842).
-This system is essentially an Azure DevOps task([OneLocBuild](https://ceapex.visualstudio.com/CEINTL/_wiki/wikis/CEINTL.wiki/107/Localization-with-OneLocBuild-Task))
-that we run in each repo's build pipeline to gather up our English resource files, send them off to the localization system, and receive
-localized resource files back. Because this is common infrastructure that will need to be implemented across all of our customer repos,
-it makes sense to implement it in Arcade.
-
-More information on migration can be found [here](https://ceapex.visualstudio.com/CEINTL/_wiki/wikis/CEINTL.wiki/1481/Migrating-out-of-SimpleLoc?anchor=ado-pipeline-creation-for-projects-hosted-in-github).
-
-A major component of the new localization system is an index file called `LocProject.json`. In the linked documentation above, this file is
-checked into the repository, which would require every single repo to maintain a list of all of their resource files that need to be localized
-manually and make sure it stays in sync with their changes. This is a non-ideal solution. Instead, we hope to generate the `LocProject.json` file
-at build time prior to running the OneLocBuild task.
-
-## Goals
-The primary goal of the project is to generate the `LocProject.json` file at build time so that we can automate the localization process
-as much as possible with minimal to no intervention from customer repos.
-
-## Stakeholders
-The primary stakeholders for this project are the .NET Core Engineering team (who will maintain this process on
-behalf of our customer repos) and the localization team. The other stakeholders include all of the customer repos who may have
-to maintain some new files for localization on their end if we aren't able to completely automate the work within `eng/common`.
-
-## Risk
-The most significant risk facing this project is any information in the index file that needs to be manually tweaked. As an example,
-[@RussKie](https://github.com/RussKie) created a [first pass attempt](https://github.com/dotnet/arcade/issues/6842#issuecomment-771963490)
-at this and found that he still had to manually remove some .resx files from the `LocProject.json` file after generation. We could
-create something like an exclusions list or other file that is *more* static than the `LocProject.json` would be to take much
-of the burden off of customer repos if we can't find a way to be smart with our file inclusion.
-
-To mitigate this risk, we will work with the localization team to see if we can replicate the logic they were using previously when they were
-automatically scanning our repos and loop in the customer repos to make sure the localization pipelines are working properly for them.
-
-A second risk facing us is the need to backport this to servicing branches of Arcade. At this time it is unknown if there will be significant
-challenges to this separate from the ones we currently face in master. We will work in tandem with [@mmitche](http://github.com/mmitche)
-to make sure that this is mitigated as much as possible.
-
-Finally, the hard deadline of March 31, 2021, which is when the old localization system will be turned off is a risk. While we will likely
-be able to accomplish the majority of the work by this point, the unknowns of the servicing branches, in particular, are worrisome. We will work
-with the localization team to put in place a temporary manual process they recommended if this date slips for any of our branches.
-
-## Serviceability
-Two PATs are required by the OneLocBuild task: a GitHub PAT and an AzDO PAT for the [ceapex organization](https://dev.azure.com/ceapex).
-The latter will have to be created and maintained.
-
-There will be tests for the `LocProject.json` generation script and any other scripts that are created to ensure they are generating files
-correctly.
-
-### Rollout and Deployment
-This project will be tested thoroughly in customer repos before we check it into Arcade. Once we do check it into Arcade, it will simply be rolled out
-as a part of Arcade.
-
-## FR Handoff
-Most likely, we will only need to write a single document on the results of this project to facilitate FR handoff.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5COneLocBuild%5Cone-pager.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5COneLocBuild%5Cone-pager.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5COneLocBuild%5Cone-pager.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/one-pager-template.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/one-pager-template.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/one-pager-template.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/one-pager-template.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,71 +0,0 @@
-# Epic Template - One-Pagers Guidelines
-
-## Goal and Motivation
-
-The information included within our epics are high level business objectives and does not always leave much room for practical information. Sometime the v-team is able to capture all the information listed within outlined below in the epic. If that is the case, there is no need for an additional document.
-
-In most cases, however, v-teams need a place where they can capture additional information that helps them "think about" how they are going to implement a given feature.
-
-The goal of the one-pager is to bring clarity to how the v-team is going to implement and support specific aspects of the business goals defined in the epic.
-
-The document below is meant to be a guideline on what the v-team should be thinking about when defining the feature they are working on. It is up to you what you include in your one pager.
-
-## One-Pager Guidelines
-
-In this section you will find the areas that you should consider including in your one-pager.
-
-### Stakeholders
-
-Who is this work for (i.e. stakeholder and those that should "sign-off" on your POC) and what are the problem(s) they asking us to solve?
-
-### Proof of Concept (POC)
-
-An effective proof of concept proves the goal of a proposed project is viable, and will be successful. The value of a POC is it can help the v-team identify gaps in processes that might interfere with success.
-
-A POC can help
-- Elicits feedback from everyone involved in a project, including those who might not have otherwise contributed, thereby mitigating unforeseen risk.
-- Creates a test project to evaluate before work begins on an actual project.
-- Verifies that concepts and theories applied to a project will have a real-world application.
-- Helps us to prove our assumptions (for example, if certain functionality, like using a service account to post comments from GitHub to Teams in a service, is possible) before committing to completing the work in a given timeframe.
-- Helps us to adjust our expectations about how much work a feature might take to complete depending on the challenges we run into that we didn't originally consider in our assumptions.
-- Can have more than one POC, if necessary, for a project.
-
-
-### Risk
-
-- Will the new implementation of any existing functionality cause breaking changes for existing consumers?
-- What are your assumptions?
-- What are your unknowns?
-- What dependencies will this epic/feature(s) have?
- - Are the dependencies currently in a state that the functionality in the epic can consume them now, or will they need to be updated?
-- Is there a goal to have this work completed by, and what is the risk of not hitting that date? (e.g. missed OKRs, increased pain-points for consumers, functionality is required for the next product release, et cetera)
-- Does anything the new feature depend on consume a limited/throttled API resource?
-- Have you estimated what maximum usage is?
-- Are you utilizing any response data that allows intelligent back-off from the service?
-- What is the plan for getting more capacity if the feature both must exist and needs more capacity than available?
-
-### Usage Telemetry
-
-- How are we measuring the “usefulness” to the stakeholders of the business objectives?
-- How are we tracking the usage of this new feature?
-
-## Service-ability of Feature
-
-Changes that we implement often require addition maintenance to support them long term. The FR group has been set up to handle this work but it is up to the v-team to make sure FR is successful in servicing the changes made within your epic long term. Please see the [Servicing Guidelines](https://github.com/dotnet/arcade/blob/main/Documentation/Project-Docs/Servicing%20Guidelines.md) Document for what you should be thinking about during your feature creation to help the team be able to easily service your feature long term.
-
-## House Keeping
-
-In order to align with Epic Content Guidance, one-pagers should be stored in a central location.
-- The folder to store your One-Pager can be found in the [Documentation Folder](https://github.com/dotnet/arcade/tree/main/Documentation/TeamProcess/One-Pagers)
-- The name the one-pager should contain the name of the epic and the epic issue number (for easy reference).
- - Example: Coordinate migration from "master" to "main" in all dotnet org repos - core-eng10412.md.
-- Use the PR process to document the discussion around the content of the one-pager.
-
-Guidance for Epics can be found at [Guidelines for Epics](https://dev.azure.com/dnceng/internal/_wiki/wikis/DNCEng%20Services%20Wiki/552/Guidelines-for-Epics) wiki.
-
-After all discussions have been resolved, the resulting one-pager document should be signed-off (this does not need to be a formal process) by stakeholders (e.g. v-team members, epic owners, et cetera) and then linked to the associated epic's GitHub issue for discover-ability.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5Cone-pager-template.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5Cone-pager-template.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5Cone-pager-template.md)
-
Binary files /tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/OS Onboarding/Images/DevWorkFlow.JPG and /tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/OS Onboarding/Images/DevWorkFlow.JPG differ
diff -Nru "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/OS Onboarding/Requirements.md" "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/OS Onboarding/Requirements.md"
--- "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/OS Onboarding/Requirements.md" 2023-10-18 18:08:29.000000000 +0000
+++ "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/OS Onboarding/Requirements.md" 1970-01-01 00:00:00.000000000 +0000
@@ -1,78 +0,0 @@
-# Overview
-The requirement for Platform/OS onboarding is that it should be relatively painless to add/update/delete Queues/Scalesets and doable by all customers.
-
-# Requirements
-- All Exisiting queue/scaleset combos are described in one or more yaml files in a VSTS/Github Repo which looks something like this.
-```
-- &Windows10
- Name: Windows.10
- AzureImage:
- Name: Windows
- Version: 10
- Artifacts:
- - vs_15_08
- - helix_runtime
- MaximumScale: 50
- Owner: abc@
-
-- <<: *Windows10
- Name: Windows.10.Open
- Public: true
- MaximumScale: 20
- Owner: abc@
-
-- Name: Windows.7.Amd64
- Public: true
- MaximumScale: 20
- location: westus
- tags: {
- "QueueId": "Windows.7.Amd64",
- "ResourceGroupName": "Windows.7.Amd64.WestUS",
- "WorkspacePath": "D:\\\\j",
- "IsAvailable": "true",
- "IsInternalOnly": "true",
- "UserList": "all",
- "OperatingSystemGroup": "windows"
- }
- scaling rule:
- Owner: abc@
-
-- Name: Windows.7.Client
- BaseImage: http://dotnet-eng-images.storage.azure.net/base-images/Windows.7.Client/15.6.750.vhd
- Artifacts:
- - vs_15_08
- - helix_runtime
- MaximumScale: 3000
- Owner: abc@
-
-- Name: OSX.1012
- Unmanaged: true
- Owner: abc@
-
-- Name: TOF.External
- Unmonitored: true
- Owner: abc@
-```
-- User makes a PR to the repo to add/edit/delete scalesets in one of the yaml files.
-- Validation Service in the Repo runs sanity checks on like “Does that image exist” and “Are those artifacts known artifacts”, if possible.
-- If Validation succeeds, send a PR to Image Creation Factory with specific parameters (TBD) to create Image /Artifcacts .
-- Wait for the image to be created, Image Factory notifies via a webhook with a status ("image complete"/"failed to create an image" etc.)
-- If the image is created, validate that the image works by creating a scaleset and deploying to INT/staging with one machine and test with a sample job. If artifacts are requested, quick validation per artifact to make sure the artifact is operating as expected (e.g. if someone wanted VS, make sure a “msbuild test.proj” does the right stuff, if helix is request, make sure it reads a queued item and processes a job)
-- If anything failed, mark the PR as failed
-- Maintain an image mapping yaml, which contains a mapping against commit# and the Image/Artifact created by the Image Creation Factory.
-- When the User-initated PR or PR to update image mapping yaml merges, initiate CI build/release that
- - Creates any required queue
- - Transforms every defined scaleset/queue combo into an Azure ARM template and pushes all those, which will update any existing scale set, and create new ones as necessary. This might involve fetching some temporary secrets to initialize things
- - Delete any scale/set queue that is defined but not in this repository anymore (so we can decommission things)
-- To handle updating existing scale sets (some VMs running “newer” images than others), we need to augment the VM cleanup tool to detect when there are “older” VM’s in the scale set that contains newer ones and mark them as “unhealthy” so the get deleted, and newer images take their place
-- Cleanup service runs periodically every n days to clean up outdated VMs/scalesets, update Image Mapping yaml accordingly.
-- Add Helix as an Artifact to ImageFactory
-
-# Dev Work Flow
-![](./Images/DevWorkFlow.JPG?raw=true)
-
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5COS%20Onboarding%5CRequirements.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5COS%20Onboarding%5CRequirements.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5COS%20Onboarding%5CRequirements.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/RepositoriesInTheirOwnSubscription.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/RepositoriesInTheirOwnSubscription.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/RepositoriesInTheirOwnSubscription.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/RepositoriesInTheirOwnSubscription.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,58 +0,0 @@
-# Repositories working on their own azure subscription
-
-In order to meet our SLA and to limit our azure spending, there are repositories that need to run on their own subscription1.
-
-There are some guidelines to decide when a repository needs to be transferred to their own subscription. This is going to be shared later.
-
-## How do I transfer a repository to its own subscription?
-
-To transfer a repository to their own subscription we will go through the following process:
-
-1. Work with DDFun2 to create a new Azure Subscription. You will need to provide the number of cores that you need, for which [region](https://docs.microsoft.com/en-us/azure/virtual-machines/regions) and which [VM type](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes) you need. The process of creating the new subscription takes around 1-2 weeks.
-
-2. Identify the Helix queues this repository needs. A repository usually only uses a subset of the available Helix queues, so it is important to identify the ones that will be duplicated.
-
-3. Create the description of the subscription on subscriptions.yaml:
- - Subscription - Name of the subscription.
- - TeamName - Name that is going to identify the queues for this subscription.
- - Repositories - The repository/repositories you want to migrate.
-
-4. Add the list of queues to the corresponding yaml definition files. The name should be of the form `.`. The **QueueName** is the name of the queue you are duplicating and the **TeamName** is the one defined in the step above for that subscription.
-
-5. The last step is to let the AutoScaler and the Core Rebalancer know about the new subscription. This should be done in [*AutoScaleQuotaConfigProd.json*](https://dev.azure.com/dnceng/internal/_git/dotnet-helix-machines?path=%2Fsrc%2FServiceFabric%2FProcessAutoScaleService%2FConfigs%2FAutoScaleQuotaConfigProd.json&version=GBmaster&_a=contents) where the subscriptionId and the quota for the subscription needs to be added.
-One important thing to keep in mind is that in most cases the cores added to the new subscription account for our overall core consumption in other words the cores for the new subscription needs to be subtracted from another subscription most likely HelixProd.
-
-## How this works
-![](./assets/RedirectJobsWorkflow.png)
-
-1. The user sends the Job to the *Queue* and as part of the job information the user includes the *Repository*.
-
-2. The Helix JobController gets a list of all the queues under that repository and if the ```*Queue*``` exists as part of that list a new queue is assigned for that job, the new queue is going to be ```*Queue.TeamName*```.
-
-If there is not any match the queue doesn't change, which in most case means that this is going to be processed by HelixProd. This could happen for queues that the repository doesn't usage on a significant way, but if this start happening in queues in which the impact is notable for HelixProd, we should create a specific queue for the repository.
-
-Internally *Queue* and *Queue.TeamName* lives on different subscription so as soon as we send the job to another queue, we are sending that job to another subscription.
-
-It's important to keep in mind that we are maintaining all these new queues and these queues works on the same way that the rest. This means that as soon as a job has been redirected this jobs is going to wait for a machine on that queue (until the queue scales up), retry work items on that queue if the work item fails and if there is any problem we need to handle that problem on the *Queue.TeamName* queue.
-
-# How many cores should I assign to the new subscription
-
-Our service is authorized to consume a specific number of cores so even after splitting the queues to a new subscription we should stay under that authorized limit. For example, HelixProd can have 6500 cores authorized and we plan to have a new subscription (splitting it from HelixProd) we could end having HelixProd with 4500 and the new subscriptions with 2000, having in total the 6500 cores.
-
-After identifying which are the queues you are going to duplicate you need to know the number of cores that are used by that repository, for that you can do the following steps, for every queue:
-
-1. Get the `total number of cores` consumed by that queue during a timespan.
-2. In the same timespan, identify what `percentage` of the jobs that are sent to that queue belongs to the repositories that you are creating a new subscription for.
-3. Compute the number of cores that the repository is consuming in that queue, based on the `percentage` and the `total number of cores` (`total number of cores` * `percentage`)
-
-Once the total cores needed per queue is calculated, sum up the total cores for the queues in question to get an idea on how many cores is needed for that repository. **Suggestion:** Give 5% more to the total cores that you calculated.
-
-
-#
-###### 1Unit of Azure Billing and organization
-###### 2 Reach out to @ilyas1974
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CRepositoriesInTheirOwnSubscription.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CRepositoriesInTheirOwnSubscription.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CRepositoriesInTheirOwnSubscription.md)
-
diff -Nru "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/SBOM Generation/one-pager.md" "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/SBOM Generation/one-pager.md"
--- "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/SBOM Generation/one-pager.md" 2023-10-18 18:08:29.000000000 +0000
+++ "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/SBOM Generation/one-pager.md" 1970-01-01 00:00:00.000000000 +0000
@@ -1,44 +0,0 @@
-## Generating the Software Bill of Material (SBOM)
-
-## SBOM
-The Executive Order(EO) and The National Telecommunications & Information Administration (NTIA) report defines an SBOM as a formal record containing the details and supply chain relationships of various components used in building software. On May 12, 2021, the U.S. Presidential EO, section 4(e)(vii) is requiring all software sold to the federal government to provide a Software Bill of Material (SBOM).
-
-SBOM is usually a single file (such as .json) that captures this information about the software from the build. Microsoft has decided to use Software Package Data Exchange (SPDX) as its SBOM format of choice. All software produced from Microsoft will have an SPDX SBOM.
-
-SBOMs provide two core benefits:
-
-i) Software Transparency - this is a small step towards increasing trust as the SBOM describes the "ingredients" of the software and their relationships. This also enables external consumers of SBOMs to do vulnerability lookups on the open source software embedded within.
-ii) File checksums for integrity verification purposes
-
-## Goals
-Primary goal is to generate SBOM for all the software produced by .Net. Here we are focusing on the following areas:
-
-i) Staging pipeline
-ii) Arcade and all the repos that use arcade eg: Runtime, aspnetcore etc.
-iii) Repos that are not on-boarded to arcade eg: arcade-services, OSOB, helix etc.
-
-## Stakeholders
-- .NET Core Engineering
-- .NET Core Engingeering Partners
-- Microsoft
-
-## Unknowns
-There are 2 ways to generate SBOM
-1) Azure Task - Helps with generation of SBOM and uploads it to db
-2) Executable - Creates the manifest but uploading is TBD
-
-## Rollout and Deployment
-- Firstly we will be generating SBOM for staging pipeline. Here we already have a place where we upload all the signed assets, so we will need to add a azure task to generate and upload the SBOM. After generating SBOM, we will need to get a one time manual sign off from partners to see if the generated SBOM is valid and contains all the 'expected' items.
-- Then focus on our Engineering systems - In Arcade (main branch) we are planning to use the executable to generate SBOM. Here we will validate if SBOM is generated correctly. In Arcade we will add a feature flag for SBOM generation. We will initially turn on this feature for a few repos and see if SBOM is getting generated correctly, then roll out for all the other repos. This gives repo owners the ability to opt-out of the feature incase of failure, while we investigate.
-- Backport SBOM generation changes to release/6.0 branch.
-- The repos that use arcade may have multiple places where will have to generate SBOM. We will need to generate SBOM for all the repos that use arcade.There might be multiple SBOM in this scenario. Then we need to get sign off from the repo owners to validate SBOM.
-- Lastly, we will have to work on repos that are not on-boarded to arcade like arcade-service, helix, OSOB.
-
-## FR handoff
-- Will document SBOM generation in arcade and how repo owners can on-board.
-- Will document any failures as I encounter.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CSBOM%20Generation%5Cone-pager.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CSBOM%20Generation%5Cone-pager.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CSBOM%20Generation%5Cone-pager.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/SDL/arcade-services-fall-2020-one-pager.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/SDL/arcade-services-fall-2020-one-pager.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/SDL/arcade-services-fall-2020-one-pager.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/SDL/arcade-services-fall-2020-one-pager.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,37 +0,0 @@
-### Stakeholders
-
-Stakeholders are: Chris Bohm as SDL owner but also whole .NET Engineering Services team as this work relates to every part of our codebase.
-
-### Risk
-
-Biggest potential risks are:
-- Huge surface area to cover - Arcade Services contains many different services, tools and libraries. They all have to be assessed separately. This will also require a lot of investigative work especially in less known parts of our infrastructure.
-- Mission Control service - it was not deployed since a long time and we have to make it compilable and deployable again to fix reported SDL vulnerabilities
-- Risk of big changes to our services for compliance reasons that will either consume a lot of time or require us to do big breaking changes to our services.
-- We have 60 days to fix the found vulnerabilities so there is also a risk of running out of time. Not meeting this deadline could potentially cause some legal/compliance problems for our team.
-
-### Serviceability
-
-This epic will change only those parts that are found to be non-compliant. We don't expect many changes to testing, deployment and servicability of our components. In cases where it will be needed we will try to document the process inside relevant services. We will also prepare the documentation summarizing the work done and tips for future SDL rounds.
-
-#### Rollout and Deployment
-
-This epic does not introduce any new componentss so there won't be any major changes to rollout and deployments. The only exceptions are:
-- Mission Control - it haven't been deployed since long time so we will need to get the deployment scripts to working state again and deploy it.
-- Grafana - we need changes to deployment scripts to be able to deploy specific version of Grafana instead of the latest one found in package repository. This is needed so that version of Grafana in Component Governance matches the actual instance.
-
-### Usage Telemetry
-
-There is no telemetry to track in this epic. The only metric is that we need to close all work items in SDL assessments.
-
-### Monitoring
-
-No new monitoring is needed.
-
-### FR Hand off
-
-There is no FR hand off required but we will prepare documentaton summarizing what has been done and put it in dotnet/core-eng wiki.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CSDL%5Carcade-services-fall-2020-one-pager.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CSDL%5Carcade-services-fall-2020-one-pager.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CSDL%5Carcade-services-fall-2020-one-pager.md)
-
diff -Nru "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Secret Management/one-pager.md" "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Secret Management/one-pager.md"
--- "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Secret Management/one-pager.md" 2023-10-18 18:08:29.000000000 +0000
+++ "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Secret Management/one-pager.md" 1970-01-01 00:00:00.000000000 +0000
@@ -1,51 +0,0 @@
-# Secret Management
-We need a secret management system which allows us to audit and monitor secrets, rotate them in an automated fashion (as much as possible), and manage appropriate / inappropriate secret usage. More specific (and additional requirements) are available [below](#requirements).
-
-## Requirements
-Requirements [gist](https://gist.github.com/chcosta/51af24ab8a1cfd303a50d0aa7332e7f0)
-
-## Stakeholders
-- The First Responder team
-- All of .NET Core Engineering
-
-## Risks
-- Another service that we have to manage and monitor
-- Adds more policy around secret usage in our services
-
-### Unknowns
-- Can we monitor all key vault secret accesses? Azure has access logs for this that log requesting IP. Is that enough?
-- There are categories of secrets which don't involve a service accessing key vault for the value (things like account passwords and otp codes), can we monitor those without introducing another layer? If not, is that ok? These should be monitored and managed in a key vault just accessed manually and/or with a tool when a person needs them.
-- Can we use a third party tool for secret management? None of them look promising. https://github.com/microsoft/AuthJanitor has a disclamier in their readme about it not being ready for prime time, and it requires deploying a website that requires SDL stuff.
-
-### Proof of Concepts
-- https://github.com/microsoft/AuthJanitor exists. We very much don't want a website, but can we use some of this.
-- https://www.vaultproject.io/use-cases/secrets-management looks cool but costs money.
-- Azure has a sample https://github.com/Azure-Samples/serverless-keyvault-secret-rotation-handling but that is just boiler plate that we might be able to take something from.
-
-## Serviceability
-- Tests for rotation of all secrets that can be rotated
-- Management system runs for PRs to validate configuration, and changes are validated in staging before deployment
-- The tool will not accept customer input, so doesn't affect SDL or threat model. All authentication will be handled by azure cli, so arbitrary people can't mess with secrets they don't already have access to.
-
-## Rollout and Deployment
-- We need to deprecate the existing "secret notifier"
-- This will be deployed with the existing services
-
-## Usage Telemetry
-- Usage will be tracked in application insights
-
-## Monitoring
-- Grafana alerts and build results
-
-## FR Hand off
-- Will create documentation about
- - How to use the tool
- - What to do when it fails the build
- - What to do when a secret gets leaked or expires
- - How to diagnose the tool when alerts get triggered
-
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CSecret%20Management%5Cone-pager.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CSecret%20Management%5Cone-pager.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CSecret%20Management%5Cone-pager.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/security-builds.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/security-builds.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/security-builds.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/security-builds.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,169 +0,0 @@
-# Security Builds for .NET Core
-
-This document describes security builds of .NET Core.
-
-- [How to setup a security build](#how-to-setup-a-security-build)
-- [How to kickoff a security build](#how-to-kickoff-a-security-build)
-- [How to get values for queue variables](#how-to-get-values-for-queue-variables)
-- [How to access and resolve security issues](#how-to-access-and-resolve-security-issues)
-
-
-## How to setup a security build
-
-Security Development Lifecycle ([SDL](http://sdl/)) specifies the minimum security requirements that must be satisfied before making a Microsoft software or service available to customers. To help product teams fulfill the security requirements, SDL team provides a few tools and services, in addition to detailed guidance and a dedicated support team. Some these tools and services are available as a VSTS extension called Secure Development Tools ([SDT](https://www.1eswiki.com/wiki/Secure_Development_Tools_VSTS_Extension)), which is a collection of build tasks. These build tasks can be added to a VSTS build definition.
-
-Trust Services Automation ([TSA](http://sql/wiki/Trust_Services_Automation_%28TSA%29)) is a service that analyzes the logs produced for security tools, identifies regressions, creates workitems to track the regressions, and generates a detailed report. One of the tasks in SDT extension is to collect logs from security tools and upload them for processing at TSA. This allows product teams to setup a VSTS build definition that acquire the latest version of security tools, run the tools against the product, gather and analyze logs, detect regressions, and prepare reports.
-
-Security build for .NET Core is a VSTS build definition that uses SDT extension. A security build does not involve building the product from source. This build operates on build artifacts of an official build. The approach for security build can be summarized as follows.
-
- 1. Download the packages, using `sync` command, for the specified official build Id
- 2. Extract assemblies and symbols from the packages
- 3. Run security tasks that scan assemblies. Use APIScan and BinSkim tasks from SDT extension.
- 4. Get the sources at the SHA specified in `version.txt`, which is obtained when packages are extracted at step #2
- 5. Run security tasks that scan source code. Use CredScan and PoliCheck tasks from SDT extension.
- 6. Gather logs and upload to TSA. Use publish task in SDT extension.
-
-SDT extension currently support 4 tools that are applicable to .NET Core. A short description of each tool is shown in the table below.
-
-|Tool|Description|
-|:---|:----------|
-| BinSkim | Validates compiler/linker settings and other security-relevant binary characteristics.|
-| APIScan | Determines whether or not the software complies with the API Usage Standard of the Interoperability Policy.|
-| CredScan | Index and scan for credentials or other sensitive content.|
-| PoliCheck | Scan code, code comments, and content for words that may be sensitive for legal, cultural, or geopolitical reasons.|
-
-
-.NET Core security build definitions and link to the report is listed in the table below.
-
-
-|Build Definition|TSA Report|
-|:---------------|:---------|
-| [CoreFx](https://devdiv.visualstudio.com/DevDiv/_build/index?context=allDefinitions&path=%5CDotNet%5CSecurity&definitionId=6552&_a=completed) | [CoreFx-master](http://aztsa/api/Result/CodeBase/DotNet-CoreFx-Trusted_master/Summary) |
-| [CoreCLR](https://devdiv.visualstudio.com/DevDiv/_build/index?context=allDefinitions&path=%5CDotNet%5CSecurity&definitionId=6598&_a=completed) | [CoreCLR-master](http://aztsa/api/Result/CodeBase/DotNet-CoreCLR-Trusted_master/Summary) |
-| [Core-Setup](https://devdiv.visualstudio.com/DevDiv/_build/index?context=allDefinitions&path=%5CDotNet%5CSecurity&definitionId=6658&_a=completed) | [Core-Setup-master](http://aztsa/api/Result/CodeBase/DotNet-Core-Setup-Trusted_master/Summary) |
-| [CLI](https://devdiv.visualstudio.com/DevDiv/_build/index?context=allDefinitions&path=%5CDotNet%5CSecurity&definitionId=6698&_a=completed) | [CLI-master](http://aztsa/api/Result/CodeBase/DotNet-CLI-Trusted_master/Summary) |
-
-In the current setup, a security build is triggered manually. Official Id and corresponding Azure container name needs to be provided at the time of queuing the build. In near future, Maestro will be extend to determine the Official Id and container name, and trigger a security build automatically.
-
-TSA is configured to send an email report for each scan or security build to [dncsec](dncsec@microsoft.com) that include .NET Core repository owners responsible for security issues. Repository owners should focus on new issues and regressions highlighted in the report, and take necessary action to resolve those issues.
-
-## How to kickoff a security build
-
-Kickoff of a security build is as simple as queuing a VSTS build definition. While queuing, values for four input variables need to be provided. These variables are as follows:
-
- - *PB_BuildNumber* - official build Id of the repository.
- - *PB_CloudDropContainer* - name of the Azure container from where the packages published from the official build (*PB_BuildNumber*) can be downloaded.
- - *CodeBase* - TSA codebase that corresponds to the branch. For example, `master` or `2.0.0`.
- - *NotificationAlias* - A comma separated email Ids where the TSA report should be sent.
-
-For example, a recent build Id of CoreCLR `2.0.0` branch is `20170621-01`. Packages produced from this build were published to Azure container named `coreclr-preview3-20170621-01` . To launch a security build that will scan the assemblies and source code from that official build, perform the following steps:
-
- 1. Navigate to CoreCLR security build [definition](https://devdiv.visualstudio.com/DevDiv/_build/index?context=allDefinitions&path=%5CDotNet%5CSecurity&definitionId=6598&_a=completed)
- 2. Click "Queue new build"
- 3. Enter the variable values:
- - *PB_BuildNumber* = `20170621-01`
- - *PB_CloudDropContainer* = `coreclr-preview3-20170621-01`
- - *CodeBase* = `2.0.0`
- - *NotificationAlias* = `dncsec@microsoft.com,joc@microsoft.com`
-
- Refer to the screenshot below. See [how to get values for queue variables](#how-to-get-values-for-queue-variables)
- 4. Click OK to start the build
-
-----------
-![QueueSecurityBuild.](./assets/QueueSecurityBuild.png?raw=true)
-
-----------
-
-#### Core-Setup
-
-Core-Setup requires an additional queue variable called `PB_BlobName`, which is the name of the Azure Storage blob that contains the packages produced from the official build under test. This blob is under the default container named `dotnet`.
-
-----------
-![QueueCoreSetup.](./assets/QueueCoreSetup.png?raw=true)
-
-----------
-
-#### CLI
-
-CLI builds are fully automated. This means no variable needs to be set at the time of queuing.
-A build is triggerred everyday around midnight. Build downloads the latest packages (zip) from Azure Storage corresponding to the branch. For example, latest packages of `master` branch are downloaded from (https://dotnetcli.blob.core.windows.net/dotnet/Sdk/master). SHA and build number are read from `latest.version` file (https://dotnetcli.blob.core.windows.net/dotnet/Sdk/master/latest.version).
-
-
-As described in the earlier section, when the build finishes successfully, an email report of the security build is sent to listed email Ids. The same report can be viewed online. For example, report for CoreCLR `2.0.0` will be at TSA [website](http://aztsa/api/Result/CodeBase/DotNet-CoreCLR-Trusted_2.0.0/Summary)
-
-
-### How to get values for queue variables
-
-Team dashboard [MC](https://mc.dot.net) is the place to begin when looking for details about .NET Core builds. In the dashboard, navigate to .NET Core release branch such as [2.0.0](https://mc.dot.net/#/product/netcore/200) to get the summary of most recent builds. Described below is how to get the values for queue variables for each .NET Core repository's security build.
-
-*PB_BuildNumber* is the official build number, and is a required variable in security build of all four repositories. To determine this build number for a repository, navigate to the dashboard, identify the most recent build under the corresponding repository. Build number is usually in a year-month-day format. For example, `20170622.01`. Replace the dot with a hyphen. In this example, *PB_BuildNumber* is `20170622-01`.
-
-*PB_CloudDropContainer* is the name of the container where the packages produced from *PB_BuildNumber* build are stored. To get this container name, in the dashboard, click on the build number link or button. In the details, click the URL against `buildUri` to navigate to VSTS build. Navigate to the log for `PipeBuild.exe` task in this VSTS build, and locate the container name as described below.
-
-
-#### CoreFx
-
-In case of CoreFx, container name is the value against `PB_Label`. Shown below is a portion of `PipeBuild.exe` task log showing the container name.
-
->OfficialBuildId=20170622-01 PB_SignType=real PB_Label=**corefx-preview1-20170622-01** SourceVersion...
-
-
-#### CoreCLR
-
-In case of CoreCLR, container name is `Label`. Shown below is a portion of `PipeBuild.exe` task log showing the container name.
-
->OfficialBuildId=20170622-01 SignType=real Label=**coreclr-preview3-20170622-01** SourceVersion...
-
-#### Core-Setup
-
-In Core-Setup, the default container name (*PB_CloudDropContainer*) is `dotnet`. An additional variable named `PB_BlobName` is required for security build of Core-Setup. To locate this value, open `PipeBuild.exe` task log, search for the build leg named `Core-Setup-Publish`, and click the URL against this to navigate to the build leg. Example fragment from the log is shown below.
-
- >Core-Setup-Publish - https://devdiv.visualstudio.com/DefaultCollection/DevDiv/_build?_a=summary&buildId=820812...
-
-In the build leg, locate text similar to the fragment shown below.
-
->Downloading **Runtime/2.0.0-preview3-25422-01**...
-
-
-`Runtime/2.0.0-preview3-25422-01` is the value for `PB_BlobName`.
-
-----------
-
-
-## How to access and resolve security issues
-
-For each successful build, TSA analyzes the logs from the build, and create issues, which are VSTS workitems. Query to the workitems will be in the email report sent to `dncsec`. As mentioned earlier, the report can be accessed at TSA reports, whose URL is in the format - `http://aztsa/api/Result/CodeBase//Summary`. For example, CoreFx master is at `http://aztsa/api/Result/CodeBase/DotNet-CoreFx-Trusted_master/Summary`
-
-Repository owner is responsible to triage, and drive towards resolving all security issues logged against the codebase. There are certain cases where an issue cannot be fixed, a few of them are summarized below.
-
-#### Case #1: External
-
-Say an issue is with an assembly that is not owned or built by the repository, then resolve the issue by setting the following attribute-value in the workitem.
-
-|Attribute|Value|
-|:--------|:----|
-| State|Done |
-| Reason | Work Finished |
-| Status | Resolved |
-| Resolution | Will not Fix |
-
-TSA will stop reporting such issues in future builds.
-
-#### Case #2: Configuration
-
-Say there was a configuration error while launching the build. For example, the branch name was set to `2.0.0` instead of `master`. This will pollute TSA codebase with issues from other branch. So, to cleanup the codebase, resolve such configuration issues by setting the following attribute-value in the workitem.
-
-|Attribute|Value|
-|:--------|:----|
-| State|Done |
-| Reason | Work Finished |
-| Status | Resolved |
-| Resolution | Configuration/Environment |
-
-
-For any questions about security builds, please contact [dncsec](dncsec@microsoft.com).
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5Csecurity-builds.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5Csecurity-builds.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5Csecurity-builds.md)
-
diff -Nru "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Servicing Guidelines.md" "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Servicing Guidelines.md"
--- "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Servicing Guidelines.md" 2023-10-18 18:08:29.000000000 +0000
+++ "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Servicing Guidelines.md" 1970-01-01 00:00:00.000000000 +0000
@@ -1,35 +0,0 @@
-# Placeholder for Servicing Guidelines
-
-From One-Pager Guidelines
-
-### Serviceability
-
-- How will the components that make up this epic be tested?
-- How will we have confidence in the deployments/shipping of the components of this epic?
-- Identifying secrets (e.g. PATs, certificates, et cetera) that will be used (new ones to be created; existing ones to be used).
- - Instructions for rotating secret (if the secret is new)
-- Does this change any existing SDL threat or data privacy models? (models can be found in [sharepoint](https://microsoft.sharepoint.com/teams/netfx/engineering/Shared%20Documents/Forms/AllItems.aspx?FolderCTID=0x01200053A84D1D9752264EB84A423D43EE2F05&viewid=6e9ff2b3%2D49b8%2D468b%2Db0d3%2Db1652e0bbdd3&id=%2Fteams%2Fnetfx%2Fengineering%2FShared%20Documents%2FSecurity%20Docs) folder)
-- Does this require a new SDL threat or data privacy models?
-- Steps for setting up repro/test/dev environments?
-
-#### Rollout and Deployment
-- How will we roll this out safely into production?
- - Are we deprecating something else?
-- How often and with what means we will deploy this?
-- What needs to be deployed and where?
-- What are the risks when doing it?
-- What are the dependencies when rolling out?
-
-### Monitoring
-- Is there existing monitoring that will be used by this epic?
-- If new monitoring is needed, it should be defined and alerting thresholds should be set up.
-
-### FR Hand off
-- What documentation/information needs to be provided to FR so the team as a whole is successful in maintaining these changes?
-- If you have created new monitoring rules - what tools/processes should FR use to troubleshoot alerts
-- If existing monitoring is used, do the parameters need to be updated to accommodiate these new updates
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CServicing%20Guidelines.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CServicing%20Guidelines.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CServicing%20Guidelines.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/ServicingJobRedirection/1ESManagedPoolsDesign.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/ServicingJobRedirection/1ESManagedPoolsDesign.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/ServicingJobRedirection/1ESManagedPoolsDesign.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/ServicingJobRedirection/1ESManagedPoolsDesign.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,156 +0,0 @@
-## Motivation
-
-Due to corporate policy we are required to migrate our buildpool queues to 1ES Hosted pools. The deadlines include: new pools won’t be created after June 1st. Self-hosted pools will stop working after Sep 30th.
-
-Reference documentation for 1ES pools:
-- [1ES hosted AzureDevOps Agents/Guidance](https://www.1eswiki.com//wiki/1ES_hosted_AzureDevOps_Agents%2fGuidance)
-- [1ES hosted AzureDevOps Agents](https://www.1eswiki.com/wiki/1ES_hosted_AzureDevOps_Agents)
-- [CloudTest Onboarding Guide](https://1esdocs.azurewebsites.net/test/CloudTest/How-Tos/Create-Update-Pool.html)
-
-High level migration plan looks like following:
-- Create new definitions for each of our existing buildpools that will exist in parallel with the Helix queues
-- Migrate customer Yamls to new 1ES based pools
-- Delete old buildpool queues from Helix
-- Clean up definitions for buildpools (i.e. remove all Helix-specific artifacts)
-- Decomission all instances of pool provider and all related resources (key vaults, CI pipelines, release pipelines)
-
-We need to extend OSOB to create two new types of Azure resources:
-- `Microsoft.CloudTest/image` resource for each image that we want to enable on the build pool. This resource contains reference to our SharedImageGallery image version.
-- `Microsoft.CloudTest/hostedpool` resource for each pool. The pools will reference the CloudTest images mentioned above.
-
-## New AzDo pool distribution
-
-Right now, there are two pools (Prod and staging) for our internal project and two for external. We will keep one pool for staging per project and split prod pools into 3 different ones: XAML, servicing and R&D.
-
-
-| AzDo Project | Enviroment | Current pool | New pools | Subscription |
-| ------------ | ---------- | ------------ | --------- | ------------ |
-| Internal | Staging | NetCoreInternal-Int-Pool | NetCore1ES-Internal-Int-Pool | HelixStaging |
-| Internal | Prod | NetCoreInternal-Pool | NetCore1ES-Internal-Pool | HelixProd |
-| Internal | Prod | NetCoreInternal-Pool | NetCore1ES-Xaml-Internal-Pool | DEP-UXP-WinUI-Helix |
-| Internal | Prod | NetCoreInternal-Pool | NetCore1ES-Svc-Internal-Pool | dncenghelix-02 |
-| Public | Staging | NetCorePublic-Int-Pool | NetCore1ES-Public-Int-Pool | HelixStaging |
-| Public | Prod | NetCorePublic-Pool | NetCore1ES-Public-Pool | HelixProd |
-| Public | Prod | NetCorePublic-Pool | NetCore1ES-Xaml-Public-Pool | DEP-UXP-WinUI-Helix |
-| Public | Prod | NetCorePublic-Pool | NetCore1ES-Svc-Public-Pool | dncenghelix-02 |
-
-## 1ES Managed images
-
-1ES Managed images are stored in the resource group [1ESManagedImages](https://ms.portal.azure.com/#@microsoft.onmicrosoft.com/resource/subscriptions/84a65c9a-787d-45da-b10a-3a1cefce8060/resourcegroups/1ESManagedImages/overview) in **dnceng-internaltooling** subscription
-
-Each Managed image points to an Azure image in the Shared Gallery ([HelixImages](https://ms.portal.azure.com/#@microsoft.onmicrosoft.com/resource/subscriptions/84a65c9a-787d-45da-b10a-3a1cefce8060/resourceGroups/HelixImages/providers/Microsoft.Compute/galleries/HelixImages/overview)) where we store the images we build in **CreateCustomImage.exe** this includes Prod and Staging images.
-
-1ES Managed images used in prod are tagged the same way Azure images are, the tag is **IsProductionImage** and its value is **true**. The tagging happens during the deployment of the pools in **DeployHostedPools.exe**. We will keep the lasted 3 prod images the same way we do for Azure images, the clean up will happen in CleanPRs.exe
-
-## Customer impact
-
-All customers must change their yaml files to start using 1ES Host pools. Proper documentation will be shared through our Partners DL.
-
-The old syntax:
-```yaml
-pool:
- name: NetCoreInternal-Pool
- queue: BuildPool.Server.Amd64.VS2017
-```
-
-Will be replaced by:
-```yaml
-pool:
- name: NetCore1ES-Internal-Int-Pool
- demands: ImageOverride -equals BuildPool.Server.Amd64.VS2017
-```
-
-## OSOB changes
-
-- We will need to create new definitions in the YAML inheriting from existing `BuildPool.` queues. This new definitions will have the same properties as existing ones but we will have to change the names because we can't have two definitions with the same name. We can do it for example by changing prefix (e.g. `Build.Windows.10.Amd64` instead of `BuildPool.Windows.10.Amd64`). Keeping the old names would also be technically possible but it would require more changes in the OSOB deployment steps.
-
-- We will use the `Purpose` property to mark definitions used for 1ES hosted pool images. `DeployQueues` will skip this queue but it will be processed in the new `DeployManagedPools` step instead. It shouldn't require any changes to CreateCustomImages.
-
-- A new file called hostedpools.yaml will be create under definition-base folder. It will contain pools' metadata that later will be used during their deployment. The file will look like this:
-
-```yaml
-HostedPools:
-- Name: NetCore1ES-Internal-Pool
- Subscription: HelixProd
- VMSku: Standard_Dav4
- Region: westus2
- Size: 100
-- Name: NetCore1ES-Xaml-Internal-Pool
- Subscription: DEP-UXP-WinUI-Helix
- VMSku: Standard_Dav4
- Region: westus2
- Size: 100
-```
-
-## 1ES hosted pool ARM template
-
-We will need to add a new step to OSOB build pipeline that will generate and deploy ARM template that will provision 1ES hosted pool. We could extend `DeployQueues` but it will probably be better idea to create new tool `DeployHostedPools` that can be run in parallel with `DeployQueues`.
-
-Example ARM template for 1ES hosted pool looks like following:
-
-```json
-{
- "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#",
- "contentVersion": "1.0.0.0",
- "resources": [
- {
- "name": "BuildPool.Windows.10.Amd64.Open",
- "type": "Microsoft.CloudTest/images",
- "apiVersion": "2020-05-07",
- "location": "westus2",
- "properties": {
- "imageType": "Gallery",
- "resourceId": "/HelixImages/BuildPool.Windows.10.Amd64.Open/2021.0423.210713"
- }
- },
- {
- "name": "BuildPool.Ubuntu.1804.Amd64.Open",
- "type": "Microsoft.CloudTest/images",
- "apiVersion": "2020-05-07",
- "location": "westus2",
- "properties": {
- "imageType": "Gallery",
- "resourceId": "/HelixImages/BuildPool.Ubuntu.1804.Amd64/2021.0503.000052"
- }
- },
- // ...
- {
- "name": "NetCorePublic-1ESPool",
- "type": "Microsoft.CloudTest/hostedpools",
- "dependsOn": [
- "[resourceId('Microsoft.CloudTest/images', 'BuildPool.Windows.10.Amd64.Open')]"
- "[resourceId('Microsoft.CloudTest/images', 'BuildPool.Ubuntu.1804.Amd64.Open')]"
- // ...
- ],
- "apiVersion": "2020-05-07",
- "location": "westus2",
- "properties": {
- "organization": "https://dev.azure.com/dnceng",
- "sku": {
- "name": "Standard_Dav4",
- "tier": "Standard"
- },
- "images": [
- {
- "imageName": "BuildPool.Windows.10.Amd64.Open",
- "poolBufferPercentage": "*"
- },
- {
- "imageName": "BuildPool.Ubuntu.1804.Amd64.Open",
- "poolBufferPercentage": "*"
- }
- // ...
- ],
- "maxPoolSize": "100",
- "agentProfile": { "type": "Stateless" }
- }
- }
- ]
-}
-```
-
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CServicingJobRedirection%5C1ESManagedPoolsDesign.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CServicingJobRedirection%5C1ESManagedPoolsDesign.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CServicingJobRedirection%5C1ESManagedPoolsDesign.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/ServicingJobRedirection/DesignDocs.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/ServicingJobRedirection/DesignDocs.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/ServicingJobRedirection/DesignDocs.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/ServicingJobRedirection/DesignDocs.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,81 +0,0 @@
-# Design doc for executing servicing Helix jobs in a COGS Azure subscription
-
-Epic link: https://github.com/dotnet/core-eng/issues/11639
-
-## Motivation
-
-In order to reduce costs of running our `HelixProd` Azure subscription we want to redirect all servicing builds to `dncenghelix-02` subscription. The difference between those subscriptions is that `HelixProd` is assigned to R&D budget, `dncenghelix-02` on the other hand is assigned to COGS budget (Cost Of Goods Sold). The latter one should be used for all released software but currently we are using R&D budget for everything. Moving servicing builds to COGS-based subscription will allow us to optimize the usage of our current R&D bugdet.
-
-More information about how we use different subscription types can be found at the following links:
-- [Guidance for Production and Non Production](https://dev.azure.com/devdiv/Engineering/_wiki/wikis/CNEKB/7968/Guidance-for-Production-and-Non-Production)
-- [Categories of R&D Subscriptions in DevDiv](https://dev.azure.com/devdiv/Engineering/_wiki/wikis/CNEKB/10037/Categories-of-R-D-Subscriptions-in-DevDiv)
-
-All 2.1, 3.1 and 5.x builds and tests should be redirected. Any servicing branches that are created in the future will also have to be onboarded into this mechanism which must be taken into account in this epic.
-
-Parts of the changes in this epic will leverage the work we already did to enable team/repository based redirection for `runtime` and `xaml` subscriptions. The documentation on this can be found here: [Documentation/Project-Docs/RepositoriesInTheirOwnSubscription.md](https://github.com/dotnet/arcade/blob/main/Documentation/Project-Docs/RepositoriesInTheirOwnSubscription.md).
-
-Following diagram depicts high level view of the components that will be involved in the implementation and the general logic of redirecting jobs to queues located in separate subscriptions:
-
-![Design Diagram](ServicingBuildsRedirectDesign.svg)
-
-## Development work overview
-
-The changes have to be done in four areas: Helix SDK, pool provider, OSOB and Helix API. They are mostly independent of each other so can be done in any order but it makes the most sense to do changes in OSOB before we update Helix API.
-
-The needed changes are:
-
-### arcade-pool-provider
-
-Repository: https://github.com/dotnet/arcade-pool-provider
-
-Pool Provider has to send additional information in Helix API SendJob request - in particular the name of branch that is being built or the target branch for the PR. Based on this data Helix API will be able to differentiate servicing builds from normal ones. Luckily when AzDO calls our pool provider to acquire agent it gives us `getAssociatedJobUrl` property as part of the request and we can call this endpoint to get more detailed information about the job. We also receive `authenticationToken` from AzDO so there's no need to add additional secrets or configuration on our side.
-
-The process will look like this:
-1. AzDO calls our pool provider to acquire new agent
-2. We check if the base queue specified in request exists, if not we return error to AzDO. This check already exists so there are no changes needed here.
-3. We add new HTTP request to `getAssociatedJobUrl` to get the data about the job
-4. The response contains list of all build variables. We need to extract following ones:
- - `build.reason` to distinguish between PR and non-PR builds
- - for PR: at least `system.pullRequest.targetBranch` but it may be also useful to include `system.pullRequest.sourceBranch` and `system.pullRequest.sourceRepositoryUri` at the same time as they may be useful for debugging.
- - for non-PR: at least `build.sourceBranch` and optionally also `build.repository.name`.
-5. We store the extracted variables as properties on job creation request sent to Helix API
-6. If `getAssociatedJobUrl` fails we send job anyway without additional properties to not block it's execution. It will be then sent to standard subscription by Helix API.
-
-When adding this functionality we have to pay attention to performance because:
-1. We have hard limit of 30s to respond to each acquire agent call
-2. AzDO sends acquire agent requests synchronously so any slowdown in processing of this call will affect other jobs irregardless if they are servicing or not
-
-For this reason we will need to prepare monitoring for pool provider acquire agent calls in Grafana. If the slowdown will be significant then we need to change pool provider so that it returns response earlier and then calls `getAssociatedJobUrl` and Helix API asynchronously in the background.
-
-### dotnet-helix-machines
-
-Repository: https://dev.azure.com/dnceng/internal/_git/dotnet-helix-machines
-
-We need to add new servicing queues in OSOB. We will duplicate the minimum set of queues that is currently needed for any servicing builds and instantiate the copies of them in `dncenghelix-02` subscription. New queues will inherit all settings from original queues in `HelixProd` subscription with the exception of new suffix `.svc`. For example there will exist queue called `windows.10.amd64.svc` and the related scaleset `windows.10.amd64.svc-a-scaleset` placed in `dncenghelix-02` subscription. This new queue and scaleset will have the same configuration as original `windows.10.amd64` queue and scaleset. This will be done in similar way as for `runtime` jobs redirection. An example can be found here: [definitions/shared/windows.yaml](https://dev.azure.com/dnceng/internal/_git/dotnet-helix-machines?path=%2Fdefinitions%2Fshared%2Fwindows.yaml&version=GC72af1ddb6e9ff7c7374512ccad6f78e93778066c&line=1754&lineEnd=1756&lineStartColumn=1&lineEndColumn=33&lineStyle=plain&_a=contents). As we can see in the example the servicing queue will inherit all future changes to the base queue. There will be technical possibility to modify servicing queue directly so that it diverges from the base queue that it was based on but to ease maintenance we will probably try to avoid this situation whenever possible. This will be the same approach we have chosen for `xaml` and `runtime` subscriptions (https://github.com/dotnet/core-eng/issues/10630#issuecomment-707224671).
-
-By querying Kusto we can find around 57 queues (out of total 108 non-onprem queues) that were used by servicing branch in last 200 days. Copying only those queues that are actually used for servicing lowers the overall number of resources we have to manage but on the other hand it means that we will have to create new queues after any new servicing branches are created in the future. We will need to prepare the documentation on how to do it and put it into some post-release checklist if we have one. We will also have monitoring in place that will catch any cases in which we missed some queues.
-
-### dotnet-helix-service
-
-Repository: https://dev.azure.com/dnceng/internal/_git/dotnet-helix-service
-
-We need to extend the logic in Helix API to check additional job properties specified in request - in particular the branch name. If the job is related to servicing branch (i.e. `release/X.X`) or PR targeted at servicing branch then the `.svc` suffix will be appended to the queue name specified in original request and job will be redirected to this new queue (e.g. from `windows.10.amd64` to `windows.10.amd64.svc`).
-
-If the servicing queue does not exist for any reason then the job will be sent to standard queue to not block it's execution but we will log an error in AppInsights and have Grafana alert that will notify us about this situation.
-
-The changes have to be made in `ValidateAndNormalizeRequest` controller method: [JobController.cs#L104](https://dev.azure.com/dnceng/internal/_git/dotnet-helix-service?path=%2Fsrc%2FServiceFabric%2FHelix%2FHelixAPI%2FApi%2Fv2019_06_17%2FControllers%2FJobController.cs&version=GBmaster&line=104&lineEnd=105&lineStartColumn=1&lineEndColumn=1&lineStyle=plain&_a=contents)
-
-This mechanism will be done in similar way as currently implemented team-based redirection (`runtime`/`xaml`) - the difference will be that we will check source branch instead of source repository when redirecting jobs. This servicing branch check will take priority over the team check.
-
-### arcade (Helix SDK)
-
-Repository: https://github.com/dotnet/arcade
-
-Currently we miss the name of PR target branch for test jobs so wee need to extend the `SendHelixJob` task to include that property. We already copy some variables here: [SendHelixJob.cs#L219](https://github.com/dotnet/arcade/blob/d244d21e54bd1778ae68b3ecf676e3c95fffac2e/src/Microsoft.DotNet.Helix/Sdk/SendHelixJob.cs#L219) so it should just require to add `system.pullRequest.targetBranch` to that list. After that we will also need to update the version of Helix SDK package in existing servicing branches.
-
-For 2.1 servicing branches it may be necessary to manually update the files in each branch because they don't use the same mechanisms as newer 3.1/5.0 Helix SDK-based builds. There are using groovy scripts and `upload-tests.proj` files (and maybe even other mechanisms) to schedule tests in Helix.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CServicingJobRedirection%5CDesignDocs.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CServicingJobRedirection%5CDesignDocs.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CServicingJobRedirection%5CDesignDocs.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/ServicingJobRedirection/One-Pager.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/ServicingJobRedirection/One-Pager.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/ServicingJobRedirection/One-Pager.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/ServicingJobRedirection/One-Pager.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,46 +0,0 @@
-# Execute servicing Helix jobs in a COGS Azure subscription
-## Stakeholders
-The main stakeholder of the project is Chris Bohm as the .NET Azure Champion
-
-## Risk
-More risk comes from the lack of experience the team working on Helix API projects, there will be a ramp-up on how development and debugging works in these projects.
-
-No proof of concept will be needed as we already redirect jobs based on the repository that sends them. We will expand this adding to build and now redirect jobs based on the target branch.
-
-We have all the dependencies in place to start working on redirecting work.
-
-This will not require any change from our customers, servicing jobs will be redirected once a servicing branch exists for them.
-
-Completing this work will open the possibility to increase the load in our vNext Helix queues if needed so the longer it takes to complete the project the higher the risk of not having enough R&D budget for future workload.
-
-## Serviceability
-Unit tests will be added to ensure servicing builds and tests jobs are identified by our services and then redirected appropriately.
-Daily validation will be added to our staging environment where the code paths added are executed and alerts will be triggered if unexpected behavior is identified.
-
-No new secrets will be added as part of this effort
-
-This project has no SDL implications.
-
-This epic will not modify the current steps for setting up repro/test/dev environments for Helix API. Any missing information in the current documentation will be added
-## Rollout and Deployment
-The epic doesn't consider any breaking changes on how helix services are deployed to production but feature flags will be added during the development to reduce the need of rollback in case of unexpected results in production.
-The epic isn't deprecating any service.
-
-The impacted services will follow the current deployment schedule which considers one deployment to production every Wednesday.
-
-New queues will need to be created when the servicing OS matrix changes after a release.
-Changes for redirecting work will be added to Helix API during the development of the project but won't be required once the epic is completed.
-
-The risk of running production deployments for these services is low as they are mature services and have been executing successful builds for a long time. In case of having bugs in the payload that prevents the work from being redirected, the customers most likely won't be impacted as all the work will executed in the queue originally used in the job.
-
-## Usage Telemetry
-We will add a new property to servicing jobs to mark them so they can query in Kusto for monitoring and alerting.
-
-## Monitoring
-S360 report will show us how our R&D bill goes down while the COGS bill increases.
-New Grafana charts will be created to show the subscription where the servicing runs are being executed and an alert, that gets triggered when a servicing job is executed in a R&D queue, will be added.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CServicingJobRedirection%5COne-Pager.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CServicingJobRedirection%5COne-Pager.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CServicingJobRedirection%5COne-Pager.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/ServicingJobRedirection/ServicingBuildsRedirectDesign.svg dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/ServicingJobRedirection/ServicingBuildsRedirectDesign.svg
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/ServicingJobRedirection/ServicingBuildsRedirectDesign.svg 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/ServicingJobRedirection/ServicingBuildsRedirectDesign.svg 1970-01-01 00:00:00.000000000 +0000
@@ -1,1696 +0,0 @@
-
-
-
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/source-build-orchestration.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/source-build-orchestration.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/source-build-orchestration.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/source-build-orchestration.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,96 +0,0 @@
-# Source Build Orchestration
-
-## Overview
-
-.NET Core contains two logical products: Shared Framework (which is a runtime +
-a set of managed libraries) and an SDK (which is managed code running on top of
-the shared framework to provide commands like: `dotnet run`, `dotnet restore`,
-`dotnet build`, etc.
-
-Each of these products is made up from multiple git projects which operate
-independently from one another. We use NuGet packages and zip/tar.gz files to
-hand off artifacts across project boundaries.
-
-At a high level, composing these projects into products is done by computing the
-dependency relationship between all the projects then building from the bottom
-of the dependency tree upwards. As we build, we use the locally built artifacts
-(nupkgs and zip/tar.gz files) instead of versions we would obtain from NuGet, MyGet or
-Azure Blob Storage to satisfy the needs of projects which depend on a specific
-component.
-
-In this way, we may elide building certain projects when building a product (for
-example, a CoreCLR developer may want to use a source built version of CoreCLR,
-but use the existing set of packages for the rest of .NET Core).
-
-## Bootstrapping
-
-We require an existing .NET Core in order to build .NET Core. This is due to the
-dependencies on `dotnet restore` (to consume NuGet packages) as well as
-dependencies on MSBuild and Roslyn for compiling the managed code in the
-product. We already have a set of scripts which can be used to bootstrap an
-existing version of the .NET Core SDK on a new Linux distribution (by rebuilding
-the native code from source) which we use when bringing new distributions
-online.
-
-Some projects carry their own copies of CoreCLR (instead of obtaining a copy of
-the .NET Core SDK and then running on top of that) which is a practice we should
-stop. Long term, we need to get to a world where all projects uses a single
-shared version of the Shared Framework and SDK when they need to invoke managed
-code as part of their build. Ideally, this will always be the last released
-version of the .NET Core SDK from the LTS train (e.g. when building 1.0.X and
-1.1.X we rely on a pre-existing .NET Core 1.0.0 SDK) so that a source built
-version of the previous product can be used without having to do any
-bootstrapping.
-
-At the start of a source build, if an existing toolchain is not present, we'll
-use the bootstrapping script (Rover) to get a working toolchain and then use
-that to start building.
-
-## Expressing dependencies with NuGet packages
-
-We'll continue to use NuGet packages as a way of handing off artifacts between
-projects. Note that NuGet packages should **only** be used for cross project
-dependencies. When depending on a component built out of the same git
-repository, project references should be used instead. The rationale for this
-rule is that without it we can't build all of a repositories dependencies from
-source, since it depends on a previous version of itself.
-
-We use NuGet packages instead of some other format (e.g. building to a shared
-directory with a well known convention for artifacts) because it provides the
-most flexibility across projects. For better or worse we understand how to use
-NuGet across repositories to manage dependencies.
-
-It is possible that the NuGet packages that we consume as part of a composed
-build do not match the NuGet packages we would ship. For example, a NuGet
-package may provide both Desktop and .NET Core versions of an asset. A project
-may choose not to building Desktop artifacts if just a .NET Core build was
-requested. In this case, the repository should produce a "partial package" which
-contains only a subset of the assets. This package should however have the same
-identity and version as the full package. We have already added support in
-BuildTools for projects that wish to do this.
-
-## Building Projects
-
-When the "repo api" is implemented fully across all the projects that make up
-.NET Core, we can use it to construct a build graph and start building
-dependencies. In the short term, we'll hard code the layering diagram of our
-repositories into build scripts themselves.
-
-To begin, we'll start by building the `dotnet/standard` repository to produce
-the set of .NET Standard 2.0 reference assemblies. If there are additional sets
-of reference assemblies we'll need during the build, we should introduce a
-`dotnet/reference-assemblies` style repositories which can built from source a
-set of refs that can be used to target other profiles.
-
-After a project has been built we move all of the nupkgs it produced into a
-package fallback location and use the normal NuGet APIs to consume them. In
-addition, we'll use the repo api "change" command to update the versions that
-dependent projects consume.
-
-We continue building projects and updating dependencies until the entire product
-has been built.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5Csource-build-orchestration.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5Csource-build-orchestration.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5Csource-build-orchestration.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Toolset/Overview.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Toolset/Overview.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Toolset/Overview.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Toolset/Overview.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,6 +0,0 @@
-Moved to https://github.com/dotnet/arcade/blob/main/README.md
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CToolset%5COverview.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CToolset%5COverview.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CToolset%5COverview.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Toolset/PublishConsumeContract.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Toolset/PublishConsumeContract.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Toolset/PublishConsumeContract.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Toolset/PublishConsumeContract.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,188 +0,0 @@
-# Toolset Packages
-
-- [Toolset Feed](#toolset-feed)
-- [Core Tools SDK](#core-tools-sdk)
-- [Bootstrapping](#bootstrapping)
-- [Using tools in non-bootstrapping scenarios](#using-tools-in-non-bootstrapping-scenarios)
-- [Onboarding](#onboarding)
-- [Package versioning](#toolset-package-versions)
-- [Package contents](#package-contents)
-- [Package symbols](#package-symbols)
-- [Maestro and the Versions repo](#maestro-and-the-versions-repo)
-- [Gallery](#gallery)
-- [Package validation](#package-validation)
-- [Sdk validation](#sdk-validation)
-- [Provenance](#provenance)
-- [Usage](#usage)
-
-## Toolset Feed
-
-Toolset packages should be published to a consistent location for consumption.
-
-There are currently a couple of different sources for various repo toolsets.
-
-- https://dotnet.myget.org/F/aspnetcore-tools/api/v3/index.json
-- https://dotnet.myget.org/F/roslyn-tools/api/v3/index.json
-- https://dotnet.myget.org/F/dotnet-buildtools/api/v3/index.json
-
-Shared toolset packages will be published to a single location so that consumption / [discoverability](#gallery) is simplified.
-
-Toolset package feed: https://dotnetfeed.blob.core.windows.net/dotnet-tools-internal/index.json
-
-## Core Tools SDK
-
-The core tools SDK is the entry point for toolset functionality. We will provide a core SDK which repo's will consume as an SDK (or package reference) that provides functionality for tasks that are common across repo's. The core tools SDK may contain one or more tools packages which have been determined to be beneficial to a common set of repos (most?) across DotNet. As packages prove valuable to more than one repo, they will be considered for inclusion in the core tools SDK. However, we want to be considerate of package bloat and seek alternative (but common) means of consumption for tool packages which do not meet the critera for inclusion in the core tools SDK. In other words, the packages will need to provide clear benefit to the majority (or all) of repos in order to be considered for inclusion in the core tools SDK.
-
-## Tools packages
-
-Tools packages provide functionality (MSBuild or other) which are useful to one ore more repo's. Tools packages (specifically MSBuild task packages) are currently being [discussed](https://github.com/dotnet/core-eng/pull/2541/files) and will be considered for inclusion in the core tools SDK (if they provide clear functionality to the majority of DotNet repos). Additional tools / task packages will be available for direct consumption or via the core tools SDK.
-
-## Bootstrapping
-
-Bootstrapping a repo will consist of using the CLI (obtainable via a script from a well-known / secure location) to restore the [core tools SDK](#core-tools-sdk) project.
-
-## Using tools in non-bootstrapping scenarios
-
-There are some scenarios where bootstrapping is not ideal for acquiring tools. These are scenarios which are not project based, or not tied to a specific repo. A primary example of this is telemetry, where you want to be able to send information about a build, before a repo has even bootstrapped. Another may be orchestration (depending on implementation), the orchestration may schedule and report on multiple repo's, but itself is not tied to a repo. For these scnearios, we would like to be able to provide common tooling. At this point, there are a couple of ideas being thrown around.
-
-- "DotNet CLI install tools" is one option for local toolset installs, but not available until .NET Core 2.1 Preview 2 (at the earliest).
-- "Shared Library" model (like Jenkins), where tools are provided via another common tools repo.
-- [CBT](https://cbt-userguide/Introduction.html) is a new offering from 1ES. Not enough investigation has occurred to determine if this is a viable option.
-
-We will evaluate guidance for these scenarios when they arise.
-
-## Onboarding
-
-Onboarding a repo to the toolset will be a [simple process](https://github.com/dotnet/arcade/tree/main/Documentation/Project-Docs/buildtools-bootstrap.md).
-
-We will provide links to zips / tarballs to acquire the basic pieces necessary for bootstrapping the core tools SDK on a supported platform.
-
-Note: Further guidance on onboarding a repo and customizing for a particular repo's needs will be provided in a separate documentation. Some general [usage](#usage) is provided below.
-
-## Toolset package versions
-
-Package versioning should follow precedent set by other repo's rather than trying to produce new versioning scheme / tooling. Most of the "core" DotNet repositories (CoreFx, CoreClr, Core-Setup, etc...) are using [versioning](https://github.com/dotnet/corefx/blob/master/Documentation/building/versioning.md) tools which are a part of [BuildTools](https://github.com/dotnet/buildtools/blob/master/src/Microsoft.DotNet.Build.Tasks/PackageFiles/versioning.targets). The versioning logic will be available from a [task package](https://github.com/dotnet/core-eng/pull/2541/files) where it will be generally available for all participating repositories.
-
-### Versioning constraints
-
-- Version needed to be higher than the versions previously shipped.
-- There needs to be an ability to have multiple versions per day.
-- Versions need to be always increasing.
-- Version needs to be lower than 65535 (unsigned short int max) since the version is used as assembly file version which has that constraint.
-- Version needs to be reproducible.
-- We shouldn't have the need to check in a file containing the buildnumber. Checked in files containing major/minor/patch will be permitted.
-- We will support SemVer [1.0](https://semver.org/spec/v1.0.0.html) and [2.0](https://blog.nuget.org/20140924/supporting-semver-2.0.0.html) semantics. If there are issues related to SemVer 2.0 support on older clients, then we'll consider adjusting to support those scenarios.
-
-Package version example:
-
-```Text
-SemVer 1.0: mylibrary.1.0.0-prerelease-00001-01.nupkg
-SemVer 2.0: mylibrary.1.0.0-prerelease.1.1.nupkg
-```
-
-## Package contents
-
-Standard package layout
-
-``` Text
-(root)
- - sdk/
- + Sdk.props (optional)
- + Sdk.targets
- - build/
- + $packageId.props (optional)
- + $packageId.targets
- - netstandard1.5/
- + $taskAssembly.dll
- - net46/
- + $taskAssembly.dll
-```
-
-The standard package layout *supports* (not required) consuming packages as [MSBuild Project SDKs](https://docs.microsoft.com/en-us/visualstudio/msbuild/how-to-use-project-sdk). In general, we believe that there will be one project SDK which is referenced and that the toolset packages will be consumed as package references, not as SDK's. At this time, however, we are not enforcing a strict model which prevents or requires toolset consumption as individual SDK's.
-
-`Sdk.props` and `Sdk.targets` should not contain any functional code, only imports for the respective build props / targets.
-
-### Requirements
-
-- Utilities, exe's, scripts, etc which are part of the package functionality must be usable via MSBuild properties / targets. You should not have a collection of executable files in your package which do not include MSBuild entry points for using them.
-
-- Additional package guidelines are outlined [here](https://github.com/dotnet/arcade/blob/main/Documentation/Project-Docs/Toolshed/TaskPackages.md#implementation-details)
-
-- Packages need to include accountability information in the nuspec. At a minimum, source repository link and commit SHA.
-
-### Package dependencies
-
-The tools provided via NuGet packages for MSBuild tasks will be self-contained (include all of their dependenices). It is important to be deliberate about what dependency versions are included in a package because otherwise the mix-match model of the tools will be broken. As a starting place, dependency versions should align with what is provided by the core tools SDK. If you have additional dependencies outside of those in the core tools SDK (or need to change dependency versions), then we should be deliberate (have a conversation with core tools stakeholders) about what those dependencies are and what versions are required.
-
-### Best practices
-
-- Choose non-generic build property / target names. Packages should be very considerate when defining property / target names. For example, if each package defines a property called `TaskDir` which is defined as `$(MSBuildThisFileDirectory)build/blah`, then the last package imported will be the one to define `TaskDir`, and all of your other packages will be broken. So packages should prefer to choose target / property names which are unlikely to conflict with other packages or which include the package name in the property / targetname, ie `MyPackageNameTaskDir`
-
-- Ensure build props file is imported. In the props file, you should define some property such as `<_MyPackageNameImported>true` and the targets file then includes ``. This would permit consumers to just directly import the targets file if desired instead of importing two files.
-
-## Package symbols
-
-Task package symbols should be embedded in the binaries.
-
-## Maestro and the Versions repo
-
-Toolset packages will assume the use of Maestro for automatic version uptake.
-
-Toolset packages should be publishing version information to the versions repo so that respositories using automatic version updating can consume them. When publishing, there should be package versions entries both for the repo producing the package, and for a tools location which aggregates the various toolset packages. [Details are TBD]
-
-## Gallery
-
-A traditional gallery (ie myget.org) is not provided for the toolset. Instead toolset packages may be browsed using a [package source](https://docs.microsoft.com/en-us/nuget/tools/package-manager-ui#package-sources) in Visual Studio. Additionally, toolset packages will be listed on the versions repo [link TBD].
-
-## Package validation
-
-Currently, there are no unit tests for package validation / conformance.
-
-## Sdk validation
-
-Currently, there are no unit tests for Sdk validation / conformance
-
-## Provenance
-
-Security is continuing to tighten, and we require provenance for any bits that we own / control directly. Provenance guidance / requirements are provided [here](https://securityguidance.cloudapp.net/). It is important to keep these rules in mind for all tools package providing repos.
-
-## Usage
-
-### Core Tools SDK Usage
-
-The core tools SDK will be typically consumed as a [project SDK](https://docs.microsoft.com/en-us/visualstudio/msbuild/how-to-use-project-sdk).
-
-### Tools packages Usage
-
-Tools packages will typically be consumed as package references in an individual repo. The toolset SDK should provide extensibility points to add package references for the toolset which are specific to a repo. If functionality proves to be beneficial to additional repo's, it will go under consideration for becoming part of the core toolset SDK.
-
-[Note: Extensibility points may not yet be present]
-
-Example of common `Toolset.proj`
-
-```XML
-
-
- net462
- https://dotnetfeed.blob.core.windows.net/dotnet-tools-internal/index.json
-
-
-```
-
-[Note: This example should include how to add project specific PackageReferences to the toolset]
-
-Example of tools as SDK's usage (less common usage)
-
-```XML
-
-
- net462
- https://dotnetfeed.blob.core.windows.net/dotnet-tools-internal/index.json
-
-
-```
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CToolset%5CPublishConsumeContract.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CToolset%5CPublishConsumeContract.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CToolset%5CPublishConsumeContract.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Toolset/TaskPackages.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Toolset/TaskPackages.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/Toolset/TaskPackages.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/Toolset/TaskPackages.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,195 +0,0 @@
-Task Packages
-=============
-
-Task packages provides a set of commonly used MSBuild tasks that are not included in MSBuild itself.
-
-## Tasks
-
-Each task package must be self-contained in that it cannot define dependencies on other task packages (see [below](#no-dependencies).)
-Task packages should avoid grouping too many tasks into the same place.
-
-### Reducing duplicate effort
-
-One reason to put tasks into shared packages is to reduce duplication in infrastructure code.
-We currently have multiple implementations of similar tasks in use by .NET Core and ASP.NET Core projects.
-Each is maintaining their own version.
-
-Examples:
- - https://github.com/dotnet/cli/blob/master/build/Microsoft.DotNet.Cli.tasks
- - https://github.com/aspnet/BuildTools/tree/dev/modules/KoreBuild.Tasks
- - https://github.com/dotnet/buildtools/tree/master/src/Microsoft.DotNet.Build.Tasks
- - https://github.com/dotnet/core-setup/tree/master/tools-local/tasks
-
-Some of these tasks of nearly identical behavior but just use different names. Some examples:
-
- - `GenerateFileFromTemplate` / `ReplaceFileContents` / `PreprocessFile`
- - `SetEnvVar` / `SetEnvironmentVariable`
- - `UnzipArchive` / `ZipFileExtractToDirectory`
-
-## Usage
-
-Tasks packages are distributed as a NuGet package using existing NuGet mechanisms. Developers can use them in MSBuild projects in the following ways:
-
-### Sdk element (recommended)
-
-Reference the package as an "SDK" in your MSBuild project. MSBuild 15.6 and up will automatically restore and extract this package.
-
-```xml
-
-
-
-
-
-
-
-```
-
-```js
-// global.json
-{
- "msbuild-sdks": {
- "Microsoft.DotNet.Build.Tasks.IO": "1.0.0"
- }
-}
-```
-
-**Best practice**: although SDK versions can be specified in .proj files, it is recommended to use global.json to ensure the SDK version
-is consistent within a solution.
-
-### PackageReference (pre MSBuild 15.6)
-
-Reference the project as a PackageReference in csproj files. It is strongly recommended to set `PrivateAssets="All"` to avoid this package ending up in generated nuspec files.
-
-```xml
-
-
-
-
-
-
-
-
-
-```
-
-### packages.config (NuGet 2/MSBuild 14)
-
-Use `NuGet.exe install packages.config` to download the package
-```xml
-
-
-
-```
-
-From your MSBuild project, import `Sdk.props` and `Sdk.targets` from the extract package location.
-```xml
-
-
-
-
-
-
-
-
-
-```
-
-## Implementation details
-
-Task packages have this layout
-
-```
-(root)
- - sdk/
- + Sdk.props
- + Sdk.targets
- - build/
- + $packageId.props
- + $packageId.targets
- - netstandard1.5/
- + $taskAssembly.dll
- - net46/
- + $taskAssembly.dll
-```
-
-Packages have the following metadata in their nuspec.
-
-```xml
-
-
-
-```
-
-### No dependencies
-
-MSBuild task packages cannot have dependencies (due to the current design of the NuGet SDK resolver: https://github.com/Microsoft/msbuild/issues/2803).
-
-```xml
-
-
-
-
-
-
-
-```
-
-### Examples
-
-The following are examples of tasks that we would like to build into common shared packages.
-The implementation and naming is still subject to further review.
-This list contains a set of tasks that appear to be commonly used across several repos.
-
-Microsoft.DotNet.Build.Tasks.IO
- - `DownloadFile` - downloads a file.
- - `ZipArchive` - creates a .zip file
- - `UnzipArchive` - unzips a .zip file
- - `GenerateFileFromTemplate` - supports a very simple templating format for key/value substitutions in a file
- - `ComputeChecksum` - computes the SHA256 or SHA512 checksum for files
- - `Chmod` - change Unix permissions
-
-Microsoft.DotNet.Build.Tasks.Git
- - `GetGitCommitHash` - reads the current commit hash from a .git folder without needing git.exe installed
- - `GetGitCommitBranch` - reads the current brancn name
-
-Microsoft.DotNet.Build.Tasks.Shell
- - `Run` - like `Exec`, but it handles the complexity of escaping quotes and spaces in arguments
- - `RunDotNet` - like `Run`, but launches a process using the same `dotnet.exe` file used to launch the current MSBuild process. Espcially useful for longing .NET Core console build tools
- - `FindDotNetPath` - finds the `dotnet.exe` path on a machine
- - `SetEnvironmentVariable` - sets an environment variable
-
-Microsoft.DotNet.Build.Tasks.NuGet
- - `PackNuspec` - packages a .nuspec file
- - `DownloadNuGetPackage` - fetches a package from a NuGet feed
- - `PushNuGetPackages` - pushes NuGet packages in parallel
- - `ReadNuGetPackageIdentity` - opens a .nupkg file and reads the package ID and version from its metadata
-
-Microsoft.DotNet.Build.Tasks.AzureStorage
- - `UploadBlobToAzure` - pushes a blob to Azure Storage account
-
-### Packages (by team) which should be shared to start
-
-**ASP**
- - Tasks
- - DownloadFile
- - ZipArchive
- - UnzipArchive
- - GenerateFileFromTemplate
- - ComputeChecksum
- - Chmod
-
-**Roslyn/CLI**
- - Repack
- - Signtool
-
- **CoreFx/CoreCLR**
- - BlobFeed
- - VersionTools/Dependency update
- - Repo Tools
- - ILAsm
- - ILLinker
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CToolset%5CTaskPackages.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CToolset%5CTaskPackages.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CToolset%5CTaskPackages.md)
-
diff -Nru "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/V3 Publishing/one-pager.md" "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/V3 Publishing/one-pager.md"
--- "/tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/V3 Publishing/one-pager.md" 2023-10-18 18:08:29.000000000 +0000
+++ "/tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/V3 Publishing/one-pager.md" 1970-01-01 00:00:00.000000000 +0000
@@ -1,55 +0,0 @@
-## V3 Publishing
-We need to retire V1 and V2 publishing.
-
-Why do we need to retire V1 and V2?
-Both V1 and V2 use multi stage(s) publishing infrastructure. V3 on the other hand uses single stage publishing, thereby reducing UI clutter. V3 reduces the number of machines used during publishing, which speeds up the whole process. In both V1 and V2, when new channels are added it requires an arcade update to the customer repository, but in V3 it will only require arcade getting an arcade update.
-
-Currently arcade release/5.0, main and all the repos getting updates from these branches are already using V3 publishing. In this epic we are planning to move arcade release/3.0 branch to use V3 publishing. We need all the repos currently which takes updates from arcade release/3.0 to use the latest V3 publishing. Also removing all the legacy publishing code that includes V1 and V2 publishing from arcade main and release/3.0 branches.
-
-Also will be working on ways to improve the performance of publishing artifacts and symbols, and add more tests during this process. This will include better way of downloading artifacts to improve publishing performance.
-
-## Stakeholders
-- .NET Core Engineering
-- .NET Core Engingeering Partners
-
-## Risk
-What are the unknowns?
-- How arcade-services would react to this publishing arcade update, because right now we have special stages in arcade-services compared to other repos which consumes update from arcade/release-3.0.
-- While on-boarding repos to V3, there might be some risk because of some unknown dependency of the repo on V1/V2 publishing.
-
-## Rollout and Deployment
-V1/V2 to V3
-a) We are deprecating legacy publshing code. This functionality will be first tested in arcade main and then in arcade-validation. Upon successful test, since all the repos getting update from arcade main are currently using V3 publishing. This rollout is going to be seamless. This is just going to be an arcade update and repo owners do not have to do anything here.
-b) Then V3 publishing infrastructure has to be added in arcade/release-3.0 and this will be tested against some repos that takes update from arcade/release-3.0. Upon successful testing, an arcade update will be rolled out which customer repos have to consume.
-c) Make a list of all the repos that will require to update like we did for arcade/release-5.0 eg:(https://github.com/dotnet/arcade/blob/main/Documentation/V3StatusUpdate.md)
-d) Will send out an email to partners to upgrade from V1/V2 to V3 and help them upgrade to V3. Documentation on how to upgrade can be found here (https://github.com/dotnet/arcade/blob/main/Documentation/CorePackages/Publishing.md#how-to-upgrade-from-v2-to-v3)
-e) After all the repos are onboarded successfully, V1 and V2 publishing infrastructure will be deprecated from arcade/release-3.0. This is going to be an arcade rollout which customers repos have to consume.
-
-Performance improvements
-a) All the performance related improvements are going to be an arcade update which customer repos have to consume. This will be tested against runtime, installer before roll out.
-
-## Serviceability
-Testing
-a) While improving the performance of publishing artifacts and symbols, tests will be added to cover downloading artifacts.
-b) While deprecating legacy publishing, some V2 publishing tests will replaced by V3 publishing tests.
-c) Some tests related to PublishArtifactsInManifest, SettingUpV3Config and Symbol publishing are already in place and can be found here (https://github.com/dotnet/arcade/tree/main/src/Microsoft.DotNet.Build.Tasks.Feed.Tests)
-
-PATs
-a) No new PATs are added as part of this epic.
-
-SDL
-No change to the SDL threat model.
-
-Confidence in deployments/shipping
-a) Before on-boarding repos using arcade/release-3.0 on V3 publishing, a subset of repos will be tested with the latest update, only upon successful test the repos will be on-boarded.
-b) Adding more tests to the publishing infrastructure will increase the confidence.
-
-## Monitoring
-Customers are responsible for keeping their build green once the changes are rolled out.
-
-## FR Hand off
-Publishing FAQs are already in place here (https://github.com/dotnet/arcade/blob/main/Documentation/CorePackages/Publishing.md#frequently-asked-questions), this document can be updated incase of new errors.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CV3%20Publishing%5Cone-pager.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CV3%20Publishing%5Cone-pager.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CV3%20Publishing%5Cone-pager.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/vestigial-objects-onepager.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/vestigial-objects-onepager.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/vestigial-objects-onepager.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/vestigial-objects-onepager.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,122 +0,0 @@
-# "Vestigial objects" one-pager
-
-Parent epic: [Regularly find and delete vestigial objects in .NET Engineering Services Subscriptions](https://github.com/dotnet/arcade/issues/8814)
-
-The goal of this effort is to develop a consistent, approachable method to understand the existing Azure inventory
-
-This effort must answer these specific questions:
-
-- *What resources do we have now?* (Or, what resources exist that are not related to running our services.)
-- *What resources have changed recently?* (Or, Something has broken, have there been any recent configuration changes?)
-
-## Stakeholders
-
-- Operations lead
-- First Responder lead
-
-It is expected the primary audience of this effort are First Responders, for service failure diagnosis, and Leadership, for cost analysis.
-
-## Risks
-
-Risk is low.
-
-All data already exists in Azure, and Azure itself provides almost all of the infrastructure necessary for use. The goals of this effort are supported and expected use of this data.
-
-All Azure data stores involved are Kusto-like, which is a well-understood technology in dnceng.
-
-This effort will also use Grafana and Power BI, which are well-established tools in dnceng.
-
-## The Plan
-
-Azure provides infrastructure and features we can leverage to achieve our goals.
-
-- [Azure Resource Graph](https://learn.microsoft.com/en-us/azure/governance/resource-graph/overview)
-- [Event Logs](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/activity-log) and [Resource Logs](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/resource-logs)
-- [Azure Monitor](https://learn.microsoft.com/en-us/azure/azure-monitor/overview) and Log Analytics
-- [Resource Tags](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/tag-resources)
-
-The Resource Graph, Event Logs, and Resource Logs are stores of audit and activity data generated by Azure. In some cases, like the Resource Graph, this information is already generated and available directly for query. In other cases, like specific resource auditing, logs must be enabled by a resource owner. Generally, this uses Azure Monitor and is listed as the "Export Log" feature.
-
-### Understanding current inventory
-
-The Azure Resource Graph provides a Kusto-like query interface to all resources in a subscription.
-
-Azure allows "Tags", which are simply key-value pairs, to be arbitrarily associated with almost any resource. Internally, Microsoft already uses this feature to track assets and configuration across subscriptions (e.g., AzSecPack, NRMSException). "Inventory" is an explicit use case supported in Azure.
-
-This effort will develop a set of standard Tags that will be applied to resources. These Tags will identify, at minimum, resources that are necessary for operation of our services. Application of tags should automated where possible. Resources deployed by ARM or as part of the weekly deployment, for example, will have their deployment process modified to attach these tags with each deployment.
-
-The inverse is also interesting: It will highlight resources that are _not_ necessary for the operation of our services. These will be analyzed and their purpose understood, creating more operational Tags as needed. They may also be deleted to eliminate unnecessary spending.
-
-### Understanding changes to inventory
-
-While the Resource Graph provides a (mostly) static view of resources, the Event and Resource Logs provide a view of changes to a resource.
-
-The specific Log information recorded varies from resource to resource, and a resource can generally be configured to log at different level of details. Coarse logging is enabled at the Subscription level. More detailed, resource-specific logging (for example, which specific secrets are accessed from a Key Vault) may be enabled at the resource level.
-
-This effort shall ensure that, at minimum, creation and deletion of Resources are logged. Additionally, an appropriate level of configuration change shall be captured. This should be at least include the existance of a change and who made that change, but may include more detailed as desired. We may adjust the level of detail over time, though likely any change will need to be decided deliberately (Changes and other detailed activity can be verbose and are generally partitioned in Resource configurations to manage bandwidth use).
-
-Records between Resource Graph and Event Log may be joined using a correlation ID provided for this purpose.
-
-Once inventory Tags are stable, this data will make plain any new resources created, when they were created and who created them.
-
-## Tasks
-
-- [ ] Define tags schema
- - [ ] Indicating that an asset is an operational asset
- - [ ] Consider deployment information
-- [ ] Modify service deployments to publish inventory tags with deployed resources
-- [ ] Manually add inventory tags to resources not automatically deployed
-- [ ] Configure subscriptions to export to a designated Log Analytics Workspace
- - [ ] Understand implications of Azure datacenter location and ingestion costs
- - [ ] Evaluate the possibility of using deployments to enforce audit configuration
- - [ ] Evaluate including Key Vault auditing information
-- [ ] Develop Dashboards presenting data to expected audience. Consider Grafana and Power BI (or a mix of both).
- - [ ] FR can quickly view changes to an arbitrary resource
- - [ ] New resources or resources with an unknown purpose are identifiable from a dashboard
-- [ ] Develop documentation
- - [ ] On use, targeting expected audience. Where to find information. How to interpret dashboards. Basic, pertinent information on how to interpret the raw Azure data. How to develop and run custom audit-like queries.
- - [ ] For new services, to ensure new resoures are properly inventoried and audited
-
-## Ideas discarded
-
-Adopt Azure deployment technologies like ARM, Bicep.
-
-Pros:
-
-- Supports configuration as code
-- Explicit and absolute control over all entities and their configuration within a resource group
-- Re-deployment overwrites any changes back to their expected state
-
-Cons:
-
-- Has no existing pattern within dnceng
-- Would require significant effort initially establishing configuration
-- Not be aligned with team's current operational goals
-
-## Telemetry and Monitoring
-
-This effort is itself telemetry and monitoring. Almost all data exists in and is managed by Azure. There is nothing intrinsic to monitor and thus requires no alerting.
-
-## FR Handoff
-
-FR is expected to be a critical user of audit information (specifcally, "recent resource changes"). Their acceptance of the final product is required.
-
-## Appendix: Resources
-
-[Azure resource inventory helps manage operational efficiency and compliance](https://www.microsoft.com/en-us/insidetrack/azure-resource-inventory-helps-manage-operational-efficiency-and-compliance)
-
-[Azure security logging and auditing](https://docs.microsoft.com/en-us/azure/security/fundamentals/log-audit)
-
-[Azure Activity Log event schema](https://docs.microsoft.com/en-us/azure/azure-monitor/essentials/activity-log-schema)
-
-[Use tags to organize your Azure resources and management hierarchy](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/tag-resources)
-
-[Explore your Azure resources with Resource Graph](https://learn.microsoft.com/en-us/azure/governance/resource-graph/concepts/explore-resources)
-
-[Query changes made to resource properties](https://learn.microsoft.com/en-us/azure/governance/resource-graph/how-to/get-resource-changes)
-
-[Azure Resource Graph table and resource type reference](https://learn.microsoft.com/en-us/azure/governance/resource-graph/reference/supported-tables-resources)
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5Cvestigial-objects-onepager.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5Cvestigial-objects-onepager.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5Cvestigial-objects-onepager.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/VSTS/dotnet-bot-github-service-endpoint.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/VSTS/dotnet-bot-github-service-endpoint.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/VSTS/dotnet-bot-github-service-endpoint.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/VSTS/dotnet-bot-github-service-endpoint.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,26 +0,0 @@
-# DotNet-Bot GitHub Service Endpoint
-
-## Public project
-
-See the [onboarding documentation](https://github.com/dotnet/arcade/blob/main/Documentation/AzureDevOps/AzureDevOpsOnboarding.md#github-connections)
-
-## Internal project
-
-### Internal VSTS service endpoint
-
-The VSTS service endpoint used for communication with GitHub (handles syncing source, setting up web hooks, etc...) is named `DotNet-Bot GitHub Internal Connection`. The PAT scopes used for the service endpoint are slightly different than what is used for the Public project. It should be available for GitHub connections in the [internal project](https://dnceng.visualstudio.com/internal).
-
-Service endpoint name: DotNet-Bot GitHub Internal Connection
-
-### Internal GitHub service account
-
-The `DotNet-Bot GitHub Internal Connection` makes use of the `dotnet-bot` service account. Repo's using the `DotNet-Bot GitHub Internal Connection` must make `dotnet-bot` a [collaborator](https://help.github.com/articles/permission-levels-for-a-user-account-repository/#collaborator-access-on-a-repository-owned-by-a-user-account) (Admin access) on their GitHub repo.
-
-### Internal personal authentication token
-
-The service endpoint uses a PAT with **`repo`**, `user`, and `admin:repo_hook` permissions generated by the `dotnet-vsts-github-b` account. You can find the PAT in `EngKeyVault` in the `dotnet-vsts-github-b-user-repo-adminrepohook-pat` secret.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CVSTS%5Cdotnet-bot-github-service-endpoint.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CVSTS%5Cdotnet-bot-github-service-endpoint.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CVSTS%5Cdotnet-bot-github-service-endpoint.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/VSTS/helix-job-sender.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/VSTS/helix-job-sender.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/VSTS/helix-job-sender.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/VSTS/helix-job-sender.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,52 +0,0 @@
-# Arcade Helix Job Sender Plan
-
-## The Current State of Things in Arcade
-
-Currently, to send a job to Helix using Arcade, developers need to do the following:
-* Add the `Microsoft.DotNet.Helix.Sdk` package to their `global.json`
-* Send up `scriptrunner.py` as part of a correlation payload if they want to receive XUnit results
-* Create an MSBuild proj file which specifies source, type, build number, queue, and appropriate payloads (both correlation & work item). This is capable of handling single-directory wildcards (e.g. zip up all the directories in directory /xyz).
-* Doing some trickery to output the correlation ID from that project file to an environment variable that can be read by future tasks
-* Create a custom script file that monitors Helix for work item completion and reports whether tests succeeded or failed.
-* Add a variable group to their build which contains the appropriate Helix token secret
-
-Then, during their build they must:
-* Do some work to prep all their test payloads into individual folders.
-* Call `dotnet msbuild` on their MSBuild proj file (regular MSBuild won't work)
-* If they want to send the same items to multiple queues, they must repeatedly call the same proj file while changing the `HelixTargetQueue` variable
-* Trigger their script to wait for Helix to finish
-
-## Where We Want to Go
-
-While devs will still need to prepare their tests to send to Helix, we can improve this experience for them. To do this, we will be creating an MSBuild task included in Arcade's `eng/common` directory. This would all be wrapped in a YAML template for easy inclusion in CI builds.
-
-The current thinking for the future process is as follows:
-
-1. Devs take a dependency on Arcade and the `Microsoft.DotNet.Helix.Sdk`
-2. Add a variable group to their build which contains the appropriate Helix token secret
-3. Devs can choose to either use a YAML template or an MSBuild task (both located in Arcade's `eng/common` directory) to talk to Helix. The YAML template is simply a wrapper for the MSBuild task.
-4. If they choose to use the YAML template, they provide the source, type, queues, and payloads as parameters to the template. One of the template parameters would allow for specifying pre- and post-task scripts to run, which would incorporate the functionality of `scriptrunner.py` today.
-5. The MSBuild task would support multiqueueing and waiting on multiple jobs.
-6. The task would then wait for the job to finish. Optional parameters would be provided for whether failed tests should fail the build or not and whether it should simply fire and forget.
-
-The YAML template is not top priority as devs are familiar with MSBuild tasks. However, having a template would be nice as it provides consistency with YAML.
-
-## Work to be Done
-
-1. Add .zip file payload support to the SDK (assigned to jofortes; estimated 1 hour of work): PR [here](https://github.com/dotnet/arcade/pull/766)
-2. Add multiqueueing support to the MSBuild task using MSBuild batching (assigned to jofortes; estimated 1 day work): PR [here](https://github.com/dotnet/arcade/pull/768)
-3. Add multi-job waiting to the SDK and link it to MSBuild task (assigned to jofortes; estimated 2 days work)
-4. Add pre- and post-task scripting functionality to the SDK and support XUnit result reporting (assigned to alperovi; estimated 2 days of work): PR [here](https://github.com/dotnet/arcade/pull/767/files)
-5. Documentation for use of all this jazz (assigned to jofortes; estimated 1 day of work)
-6. (Stretch goal) Add YAML template wrapper to MSBuild task (assigned to jofortes; estimated 1 day of work)
-
-#### Completion Schedule:
-
-* By end-of-day Fri Sep 14: Items 1 & 2 completed; item 3 in progress
-* By end-of-day Wed Sep 19: Items 3 & 4 completed; item 5 in progress
-* By end-of-day Fri Sep 21: Item 5 completed; Item 6 hopefully completed; in-progress if not
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CVSTS%5Chelix-job-sender.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CVSTS%5Chelix-job-sender.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CVSTS%5Chelix-job-sender.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/VSTS/pipebuild-feature-history.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/VSTS/pipebuild-feature-history.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/VSTS/pipebuild-feature-history.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/VSTS/pipebuild-feature-history.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,169 +0,0 @@
-# PipeBuild History
-
-## Background
-
-PipeBuild was created out of the need to produce a single unified build (set of binaries) which spanned multiple OS's and architectures. In some instances, architecture was less of a motivating factor, and configuration certainly wasn't a factor; because those aspects could have been done on a single machine or independently in the case of configuration. The ability to parallelize all of these aspects, however, proved tremendously valuable in terms of throughput and certainly leads to more desirable architectural patterns.
-
-Here's the "evolution" of PipeBuild broken into loosely chronologically implemented features.
-
-### Feature 1 - Creating an Orchestrator
-Initially, PipeBuild was very simple. There was a json file which defined build definitions, and one that combined build definitions into pipelines. Being "aware" of a build definition meant associating a name with a VSTS Build Definition ID.
-
-Definitions.json
-
-```
- "Definitions": [
- {
- "DefinitionId": 893,
- "Name": "CoreFx-Windows-Trusted",
- "ProjectName": "DevDiv",
- "BaseUrl": "https://devdiv.visualstudio.com/DefaultCollection"
- },
- {
- "DefinitionId": 1054,
- "Name": "CoreFx-Linux-Native-Trusted",
- "ProjectName": "DevDiv",
- "BaseUrl": "https://devdiv.visualstudio.com/DefaultCollection"
- }
- ]
-```
-
-Pipelines.json
-```
- "Pipelines": [
- {
- "Name": "Trusted-All-Release",
- "Parameters": {
- "TreatWarningsAsErrors": "false"
- },
- "Definitions": [
- {
- "Name": "CoreFx-Linux-Native-Trusted",
- "Parameters": {
- "DockerTag": "debian82_prereqs_2",
- "ConfigurationGroup": "Release"
- }
- },
- {
- "Name": "CoreFx-Linux-Native-Trusted",
- "Parameters": {
- "DockerTag": "rhel7_prereqs_2",
- "ConfigurationGroup": "Release"
- }
- },
- ...
-```
-
-We could then define a named definition, and provide values for variables defined in that definition (via the defined pipeline). When PipeBuild was launched, it would update replace any named variables in the definition with the values in the pipeline.json file. Side note: the pipeline.json and definitions.json file all lived in source code next to PipeBuild itself (this very quickly proved to be an unwieldy model). I'll outline some of the immediate pitfalls, but it was a very quickly thrown together solution to a major problem we had so problems were expected.
-
-Cons:
-
-- pipeline definitions next to PipeBuild source code and not in product repos meant a multi-step process for making changes to the build and a synchronization nightmare. It also meant that we started seeing a proliferation of checked in files named "pipelines.json", "pipelines.corefx.json", "pipelines.corefx.1.0.0.json", "pipelines.corefx.1.1.0.json", etc... Additionally, any change to the pipelines ended up being handled by the same person (not a product repo dev but a removed engineering dev) because the process was so disjoint from the source code.
-- We ended up duplicating a lot of data because you would have one pipeline for Release, and one for Debug, and you defined them in the same file but separately. We later made a change to support definition groups so that you could re-use the same set of definitions for a pipeline but provide different variable values to it (Debug vs Release). This cut down the duplication and the (somewhat common) instance where someone would make a change to the Release build but not scroll down through the massive file and make the exact same change to the Debug pipeline.
-- Reliability wasn't great, but it's actually gotten worse since then as our requirements have grown
-- Growing pains. As PipeBuild became adopted across more and more teams, support and maintenance costs skyrocketed
-
-Pros:
-
-- Note the reference to "DockerTag" in the pipeline. This reliance on Docker ended up being a huge win in terms of machine maintenance since we could define one VM image but run our builds / tests on many Linux distros.
-- At the time DotNet Core was moving towards JSON, so the format was familiar and somewhat reasonable by devs.
-- It did the basic job we wanted; ran pipeline jobs in parallel or sequentially, across platforms
-
-### Feature 2 - Definition groups
-
-As mentioned in the cons section of "Phase 1", our primitive json model meant for much duplication when defining a pipeline. The more places a dev has to change code, the more chance of failure. Making pipeline changes generally required a few cycles of: update, run an official build, investigate failures, rinse, and repeat. That cycle could be about 1 - 2 hours depending on the repo. The result was that making any change to the pipeline requied at least a day to get right.
-
-We introduced the concept of "Definition groups" which allowed us to define a set of build definitions with default variable values. The definition group could be explicitly referenced with a different variable value which applied to the entire grouping. This was our solution which reduced duplicated code and reduced dev errors considerably. It should be noted that this (like many of the "solutions") was not necessarily the best solution, but it addressed a certain need, and was the kind of thingyou do when there is limited investment towards a tool. The major take-away, is eliminating the places where code needs to be duplicated is a HUGE win.
-
-### Feature 3 - Reporting Parameters
-
-Reporting parameters had some positive characteristics and some negative ones. The issue we encountered was that VSTS is keenly adapted towards builds and less (or not at all) tailored towards tests. Some subset of our tests (public unit tests) were a part of our product builds, but to fully test our product across all of the required architectures, we had to create a different thing called Helix. Helix allowed us to take our product build pieces along with our test binaries, and massively parallelize running tests across architectures / platforms /etc. VSTS didn't have a great way to view both our builds and all of our tests in one place, so a new system (Mission Control) was developed that could collate all of the data into one viewable place.
-
-The way data was exposed from VSTS to Mission Control was via Reporting Parameters. Reporting Parameters were variables that our Orchestrator exposed from VSTS to Mission Control so that Mission Control could better identify builds (things like build number, platform, configuration, architecture, url, etc...).
-
-Positives:
-- The data allowed us to provide direct links from Mission Control to VSTS builds with differentiated labels.
-- The data allowed us to group product builds together and also link test results (from Helix) with those builds. Ok, to be fair, some of this was the Reporting Parameters, and some of it was other telemetry that we wired into our orchestrator (like providing a unique guid identifier unifying every product build, test build, and test run to a single orchestrated build instance).
-
-Negatives:
-- I hated Reporting Parameters. It was always an as-needed thing. As our supported build variations increased, we would have to go back and plumb through an update to support the new requirements. ie, all of a sudden we would need to differentiate what was displayed in Mission Control based on configuration or architecture or some other factor. I hated this because it meant that the Product team would expose the requirement to the Mission Control team. The Mission Control team would make the update to their source to support the new parameter, then they would talk to the PipeBuild team (not really a team), and that person would have to go change the pipeline json to support the new parameter. It was terribly inefficient and annoying, and the requirements varied across repos.
-
-### Feature 4 - SkipBranchAndVersionOverrides
-
-When developing a pipeline, we found that there were some build legs that weren't necessarily associated with a repo. This was an intentional thing for our "Publish" build definition which we shared across repos. We didn't want to arbitrarily force that build leg to clone / sync the repo which the build legs were using because every clone is another possible point of failure and we, also, were beginning to focus on performance (builds were slow). Cutting out an additional 20 minute clone / sync step (at the time our repos were not ideally performant) was a huge win. The simple fix was to add a switch ("SkipBranchAndVersionOverrides") that told the build definition to ignore any repo / branch information the orchstrator was providing.
-
-Take-aways:
-- Sharing build definitions is a thing and provides many benefits. These are the same benefits which are widely discussed by any code-reuse proponents
-- Performance is important
-- Reliability is important
-- Most of these take-aways are pretty obvious, but the less obvious one (sharing build definition), shouldn't be overlooked.
-
-### Feature 5 - Cleanup
-
-As we continued to grow in the number of repos that we built and docker based linux variants we supported, we started to see build failures on a regular (~ every 3 weeks builds would start to fail across the board) because machine disk space would fill up and VSTS didn't provide the level of cleanup that we required when dealing with hundreds of builds a day in a fixed machine pool shared across products.
-
-To address disk space issues we invested in infrastructure that we could run on every build to cleanup the VSTS agent working directories which contained builds older than a day, and also to cleanup docker images / containers which began accumulating on machines and taking up precious space. The compromise of only deleting day old builds arose because we couldn't cleanup every build (though we wanted to) due to repos failing but holding locked processes that would cause cleanup to fail. The workaround of ignoring those cases wasn't acceptable because it meant that every build we ran reported the same warning of a failed cleanup task and eternally "yellow" builds is not as pleasant as seeing "green" builds.
-
-Cleanup is actually a major thing as every time a machines disk drive fills, it requires someone to investigate the issue then manually clean it up or enlist the help of DDFUN. It's even worse because machines (obviously) fill up disk space faster under heavy usage which tends to happen when products are preparing to ship and you really don't want to see random infrastructure failures.
-
-### Feature 6 - Checked in definitions
-
-The number of repos and branches (release branches, servicing branches, dev branches) that we started to support for official builds was continually growing. It quickly became clear that supporting a branching code base is an increasingly difficult task. For example: If we had to make a change to the Linux build definitions for a release, we had to go manually update about 15 build definitions. If there was a breaking change to our orchestrator, the number of build definitions that had to be updated was closer to 40 (at the time...). VSTS was slow and this was an extremely time consuming and error prone process.
-
-The more difficult task with definitions, was that, when we would branch for a release, we had to clone all of our build definitions, rename them, update our definitions.json and pipeline.json files, and validate. This was a terrible ordeal.
-
-At some point, we discovered that, via REST API's, we could download the build definition json and put it in our product source repos as code. Moving to this method was a HUGE win when we would branch for a release. It should be noted that we didn't branch our orchestrator along with servicing branches, or the VSTS build definition which ran the PipeBuild orchestrator. We always thought we were going to move away from PipeBuild, so we've lived with this state, but if I had to push for one more feature to add to our current PipeBuild tool, it would certainly be to add both of those pieces as code.
-
-One other minor win we had with checked in definitions, was associating the build definition ID of the orchestrating PipeBuild VSTS definition with the orchestrated build legs VSTS build. Prior to this minor change, it was an engineering feat to determine which of the various PipeBuild definitions representing various servicing releases had scheduled a particular build leg.
-
-Checked in definitions are clearly superior for DotNet teams because of their branching nature, but there are certainly some downsides to our current iteration
-- We never invested in a clean flow for modifying checked in definitions. The JSON dump style works, but it includes a bunch of extra data which devs don't care about, or understand. I wrote a tool that launches a web browser and loads the local JSON into a new build definition so that it can be iterated on via the standard VSTS definition editing process. After making changes via the web UI, you can use the same tool to download the code again locally. The tool was never truly invested in though, and often the code you downloaded from VSTS would look vastly different because of back-end formatting changes which would add additional metadata or change GUIDs (devs never understood Task GUIDs).
-- The JSON build definitions contained too much extra metadata and it was nearly impossible for any dev to manually make a change to the definition with a text editor unless it was changing a variable name / value.
-- Secrets became confusing because they couldn't be defined by the JSON, they were provided by the PipeBuild orchestrator.
-
-### Feature 7 - Conditional build legs
-
-I'll be brief on this topic because VSTS now provides custom conditions. Prior to custom conditions, we had to implement a feature that would skip an entire build leg (we didn't have step / task level access) for some publishing scenarios. Yay custom conditions!
-
-### Feature 8 - Azure Key Vault
-
-Secret management started to become more and more of an issue. Whether it was a PAT expiring or (and I'm guilty of this on one occassion) an unintended secret getting leaked, when you had to update a secret in VSTS, it became a nightmare.
-
-- Multiple product teams owning their own builds meant that the same secret was defined by differently named variables in different repos
-- Multiple devs working on builds meant that sometimes secrets were unintentionally duplicated with different variables
-- You can't read the secrets (duh), so, without careful monitoring, it became nearly impossible to know which variable applied to which secret.
-
-Secrets were defined all over the place! Updating them was a nightmare. Thankfully, we were following the practice of keeping our secrets in Azure Key Vault so that we could retrieve them if necessary; for example, when bringing up a new orchestrated build. There was still no connection between key vault and the VSTS build definition, so if we had to update a secret it fell on one of two or three devs that were familiar with the system to go through and manually update all of the numerous build definitions.
-
-Our solution was to build in azure key vault access to our build orchestrator. Rather than keeping secrets in VSTS build definitions, we kept plain text values like this...
-
-```
-[AzureKeyVault=EngKeyVault,SecretName=dn-bot-devdiv-build-rw-code-rw]
-```
-
-When PipeBuild saw a variable with a text value looking similar to the above, it would connect to Azure Key Vault and retrieve the specified secret from the specified vault. After this change, there was only one secret (the access Azure Key Vault secret) in every PipeBuild VSTS definition, and all of the rest of the secrets were plain-text encoded values. Devs could reason about what a secret was, and where to find its value. Cycling a secret now meant updating the value in Azure Key Vault and every build definition would just continue to work.
-
-On the down side, individual build legs still have no connection to the azure secret values, they just have null values in the JSON marked as secret. So, you still have to know to go look at the PipeBuild VSTS definition to figure what the secret value is.
-
-### Things we never solved
-
-As a reminder, the .NET Core Engineering team never wanted to own a build orchestrator. It was a product which was created to fill a need which VSTS (and other solutions) weren't providing. The tool worked (with some pain along the way), but since it was always viewed as a temporary solution, it never was fully funded or treated with the intent of being a top notch piece of infrastructure. That mindset meant that many features which would have made our lives much easier, were added late, or never implemented. Here is an additional list of some of the features that we always wanted to implement but never got funded.
-
-- PipeBuild definition not checked in. The PipeBuild orchestrator is a tool that is launched via a VSTS build definition. While individual build legs are defined by checked in JSON code, the orchestrating definition is not. PipeBuild does not fork with the code and neither does the launching VSTS build definition.
-
-- PipeBuild was never versioned. Every official build in every branch of every product, uses the same code base (HEAD). That meant breaking changes were not possible though they happened on occassion at great expense. Some times a breaking change wouldn't be discovered to have occurred until a dormant servicing branch was spun up to produce a servicing fix. The cost at that point is prohibitive both because spinning builds in a servicing branch can be difficult, and because knowledge of how that branch worked could be lost.
-
-- Clean PipeBuild output. Our PipeBuild output just periodically queries VSTS for build status and dumps the output to the UI console. Tracking down a failing build leg while the build was in progress was difficult as its status would just scroll off the screen if you weren't diligent. If you waited until the build completed, you would get a dump of the failing build legs, but you had to scroll to the bottom of a lengthy output and doing this on a mobile device was an exercisein extreme patience.
-
-- Dev workflow for modifying definitions. Checked in definitions were great, but never fully supported after implementation. It was difficult to reason about the JSON defintions, and editing them required every dev to ping me for access to the hacky tool I had written and mentioned above. Merging two JSON blobs representing different VSTS api versions was very arduous.
-
-- Test changes to PipeBuild code path. To this day, there is no great way to test changes to our official builds without actually merging those changes and scheduling an official build. The current "best" work around, is to clone a definition, disable official build logging, change the title, and figure out some way to disable publishing (remove the publishing leg, change the publishing endpoint, or change the build number format to ensure there is no package version conflict when publishing to MyGet).
-
-- Shared Libraries for build orchestrator. For good and bad reasons, shared infrastructure libraries are currently available via DotNet BuildTools. BuildTools provides some benefits, but there are a lot of current concerns over the tooling. Discussing the concerns with BuildTools is an entirely different effort.
-
-- Replayability. Reliability has been (and continues to be) one of the most outstanding (not in a good way) issues that we have with respect to builds. There has been a tremendous effort to invest in our infrastructure so that it is resilient to intermittent network issues. In fact, we have been so busy fighting intermittent network issues, machine issues, disk space issues, etc... that we have not had time to invest in recovering from failures. We create a lot of builds for a single orchestrated build, and failures occur regularly. Today, we're forced to hotfix the failure (if possible) and then respin an entire build. This means that any failure requires at least a two hour reset to attempt to get a clean build. Multiplying that single repo concept across an entire orchestrated product (requiring many repos to successfully build) can lead to a nearly impossible task of getting 6 repos to build cleanly and understanding that any repo failing will likely lead to a day delayed trying to get another build. Re-entrant orchestration where one failing repo could be replayed without resetting an entire orchestrated build would be monumental. Barring that, even having replayability so that a single repo is not forced to entirely reset would provide benefit.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CVSTS%5Cpipebuild-feature-history.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CVSTS%5Cpipebuild-feature-history.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CVSTS%5Cpipebuild-feature-history.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/VSTS/signed-dnceng.visualstudio.com-builds.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/VSTS/signed-dnceng.visualstudio.com-builds.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/VSTS/signed-dnceng.visualstudio.com-builds.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/VSTS/signed-dnceng.visualstudio.com-builds.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,24 +0,0 @@
-# Signed dnceng.visualstudio.com builds
-
-Dnceng.visualstudio.com does not have support for signed builds.
-
-Code should still be mirrored to dnceng.visualstudio.com/internal as outlined in the [Azure DevOps Guidance](https://github.com/dotnet/arcade/blob/main/Documentation/AzureDevOps/VSTSGuidance.md#projects).
-
-## Task based build definitions
-
-If your build definition is task based, then the build definition for signing should be created in devdiv.visualstudio.com with an "External Git" source which references the dnceng.visualstudio.com/internal git repository
-
-1. Select a source: External Git
-2. Change the Connection to "New Service Endpoint"
- - User name: dotnet-bot@microsoft.com
- - Password / Token Key: Listed in "EngKeyVault" as "dn-bot-dnceng-build-rw-code-rw"
- - You can get Read access to "EngKeyVault" by joining the "DncEngKvRead" [security group](https://idweb/identitymanagement/aspx/groups/AllGroups.aspx)
- - Note: It may take a few hours for permissions to propagate
-
-## Yaml based build definitions
-
-If your build definition is yaml based, then the build definition for signing should be created in devdiv.visualstudio.com, but your code should **also be mirrored into devdiv.visualstudio.com** and the DevDiv Git source should be used for building. Yaml is only supported for source code from the same project or from specific providers (like GitHub), it is not supported for source code from an external Git source (or other project collection).
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CVSTS%5Csigned-dnceng.visualstudio.com-builds.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CVSTS%5Csigned-dnceng.visualstudio.com-builds.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CVSTS%5Csigned-dnceng.visualstudio.com-builds.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/VSTS/vsts-preview-versus-pipebuild.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/VSTS/vsts-preview-versus-pipebuild.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/VSTS/vsts-preview-versus-pipebuild.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/VSTS/vsts-preview-versus-pipebuild.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,79 +0,0 @@
-I sat down with the intent of prototyping an orchestrated build of a single repo using the new VSTS "YAML definitions" preview feature. When I started writing this doc, I intended to just cover my experience, but I realized that it's difficult to understand why some things bothered me without giving some background about where our build orchestration efforts began and where they are now. Given the early stage of the feature, it was obvious that I would encounter some hiccups, and it wasn't clear just how close to a working prototype I would get. What follows is a high level overview of my initial reaction to the VSTS Preview Features in their current state and then my notes related to the YAML definitions and how that experience deviates from our current orchestrated build solution (PipeBuild). For additional context, there's a companion document I wrote which covers a brief [history of PipeBuild](pipebuild-feature-history.md) and the more impactful features we added to it as our needs evolved.
-
-For context, when I say, the "YAML definitions" preview feature, I'm including the VSTS features which permit configuration as code (different than checked in build definitions), and the VSTS implementation of a Pipeline (orchestrated repo build). From my limited experience, there is no current "Product orchestration" feature which would support orchestrating multiple repository spanning builds. You could certainly design something within the current feature set to support product orchestration, but it's not a fully supported first-class feature and it would be a bit of an unmanageable mess.
-
-# VSTS Preview Features
-
-Before I dive into my experience, I wanted to briefly cover the involved VSTS Preview Features and my initial take on them.
-
-## Config as Code
-
-"Config as code" is not the same as a "checked in build definition". In PipeBuild, we take all of the json that defines a VSTS build definition, download it, and check it in to source. In "YAML definitions", you take only the "configuration" parts that define a build definition and check them in as code. I think this abstraction is positive. The abstraction allows us to store just the data we care about and want to manipulate. There's a lot of pieces to a "checked in build definition" that we store that we just don't care about (or understand). On the server side, they have meaning, but to us it's just ignored data which gets in the way when we're trying to understand the data we're looking at and how we want to change it. It also means, I would hope, that any changes to build definitions would be seamless to "YAML definition" users. I think that, long-term, if we had decided to continue investing in PipeBuild, this is the direction we would have gone. It was always on our radar, as a next step, but checked in build definitions filled our immediate need and there was never a desire to further invest in PipeBuild.
-
-**Concerns:**
-- One primary concern is what gaurantees we have that the API is not going to change and force us to make widespread updates to all of our various checked in "config as code" files. I don't see any way to specify a specific api version. At the moment, the API is in constant flux. Our previous history with VSTS has shown that this can be problematic if the agent API's change. Rolling forward should be an intentional /validated process (think Maestro PR's for dependencies but with API versions).
-
-## YAML
-
-I had no experience with YAML before playing around with "YAML definitions". It's not terribly difficult to pick up. There's less than an hour of documentation to read about the language specification, and then it's mostly understanding VSTS' schema implementation (which is fairly well documented though constantly changing).
-
-**Concerns:**
-- Why another data format? We've built up a lot of infrastructure around JSON (mostly deprecated now), then MSBuild, and now YAML. I understand that MSBuild is not the perfect language format for many reasons, but history has shown it to be resilient and Microsoft (.NET Core) has decided to invest in that direction in spite of convincing arguments for other data languages (JSON). History has shown that we are stronger when staying within the Microsoft technology ecosystem.
-
-There are numerous examples where we tried to break away from Microsoft technology (for very valid reasons). .NET CLI put a large investment in using project.json's for package references then eventually transitioned to MSBuild. .NET Core moved to Jenkins for CI (and peripherally for official builds), but now is moving towards Microsoft VSTS. I'm certain that there are some counter-examples to this where moving away from our own products has been a long term win, like moving to GitHub for source control vs TFS. Though, even in that instance we are moving to Git technology but Microsoft hosted source control.
-
-Using yet another format means more investment, particularly in places where there is an intersection of data sharing between builds and code. I'd be curious to know what motivated YAML as the format of choice.
-
-## Pipelines
-
-The Pipeline functionality (scripted steps, parallel builds, matrix definitions) seems to be a reasonable implementation which covers much of the orchestration required for a single repo build. I have some feedback on current implementation details, but overall the model makes sense and its representation in the VSTS UI is fairly clean. It would be nice if phase dependencies were a bit more clearly defined. ie, currently, there is no way to visually determine which dependency is blocking a phase from running.
-
-**Concerns:**
-
-- It's not clear to me if product orchestration is intended to be a fully supported feature of VSTS or up to product teams to implement via Pipelines. If it is left to product teams, then the infrastructure investment on each team could be quite large given that I don't know how the current feature set would support re-entrant orchestration or assist in producing builds reliably (network failure retries).
-
-# PipeBuild development
-For additional background and context, please see [pipebuild-feature-history.md](pipebuild-feature-history.md). Note that the linked doc does not specifically list features which VSTS doesn't support, it's just additional context. Some of the features mentioned there have already been solved by VSTS, others have not.
-
-# VSTS Prototyping Preview Features
-Much of my experience is negatively skewed by the fact that the development work is still in progress. I do think that the experience I had, however, could prove valuable in moving the system from an effective system, to a slightly more user-friendly system. I don't intend to prescribe implementations here, only to cover my experience with VSTS Preview Features from the perspecitve of someone transitioning from PipeBuild.
-
-## Hello World
-My initial step in creating a YAML definition was to create a simple "Hello World" YAML file. This was a pretty straight forward task and quickly accomplished. What I immediately realized though; was that, for more complext tasks, the model of coding locally, checking in, pushing to Git, and then scheduling a VSTS build to validate was not efficient.
-
-## Dev Loop
-In considering how to improve my development loop, I stumbled upon the VSTS Agent source and the "--runLocal" option. This seemed like precisely what I needed to speed up my investigation. I installed an agent locally from the VSTS page, then I was able to test my YAML files on my dev box. Here, I made a couple of bad assumptions. Some of the features in the documentation didn't work, so I thought I assumed that the agent I downloaded from the page wasn't current. I cloned the agent source, built it, and updated the agent binaries I was using to Latest.
-
-> Note: I was pleasantly suprised to see that the build cmd file restored an sh.exe which was then used to pass build commands to the build sh file. The cool result was that the cmd file was just a wrapper around the sh file on Windows instead of duplicating a Windows and a Linux variation of the build script.
-
-Even with the latest code, the agent would complain about the schema of my YAML file and I made the bad assumption that the schema documentation was out of date and that the agent source code was the source of truth. I was able to attach a debugger to the agent and reason through the schema it expected so that I could produce a reasonably complicated YAML file.
-
-It turns out that the agent source code was not the source of truth, though neither was the documentation. Some version of the source code which was running on the server was the source of truth, the documentation was a close second (with a few days lag), and the agent was just a source of confusion. Regardless, it didn't stop me from making progress locally and I didn't realize this was an issue until the next day when I moved to testing on the server so that I could run jobs across platforms. Using the VSTS UI to validate schema changes in YAML is a horrific experience.
-
-## VSTS versus PipeBuild gaps
-So, where does current VSTS functionality not quite close the gap to what we're accustomed to with PipeBuild?
-
-Here are some items that we must have in order to start using VSTS preview for our builds.
-
-- Azure Key Vault - currently not supported in the YAML schema I previewed though Chris Patterson has told me this is now suported via a task.
-
-- Templates - This feature was deprecated in the prototyping I was doing, but it would allow you to import YAML from another file into your pipeline. Support for this will definitely make for a cleaner (less error-prone) code-base.
-
-- Agent pools - Currently YAML definitions are only enabled in the Hosted agent pools. That means, we can't actually build our product because the machines in those pools don't have the necessary pre-requisites installed.
-
-- Build number format. It doesn't appear that there is (currently) a way to control the build number format via YAML. The documentation hints that this is on the radar very soon, https://github.com/Microsoft/vsts-agent/blob/master/docs/preview/yamlgettingstarted-features.md. Why does this matter to us? Currently, our PipeBuild VSTS definition will provide a specifically formatted build number which is then parsed by a library to produce an "OfficialBuildId". That OfficialBuildId is passed to every build leg so that generated binary and package version numbers are consistent for a build. An inability to control the build number format via YAML isn't necessarily a regression from what PipeBuild is doing today, but it does mean that we lose some of the benefit of config as code because anybody pointing at a YAML file will need to specifically know to go update their VSTS build definition to produce the proper build number format.
-
-- Telemetry - How do we report status to Mission Control? or elsewhere?
-
-These ae additional items which it would be great to utilize, but we could work around if they're not present
-
-- Docker - Docker is on the radar for support (https://github.com/Microsoft/vsts-agent/blob/master/docs/preview/runtaskindocker.md), but I was unable to get this feature to work using the "Hosted Linux Agent" machine pool which supports YAML definitions. Having this as a supported task is fantastic, once it works... In the interim, we could implement functionality using similar semantics to how we build for docker today.
-
-- Tasks - At the moment, using any task, or understanding how to implement a task in your YAML schema is extremely difficult. Chris Patterson says that there is work in progress (shipping m126 [aka next week]) which will allow you to configure a task or definition in the UI and right-click to copy it to YAML.
-
-- Cleanup - I don't know if this is yet a priority or if we continue to use our infrastructure to clean agents / docker.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CVSTS%5Cvsts-preview-versus-pipebuild.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CVSTS%5Cvsts-preview-versus-pipebuild.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CVSTS%5Cvsts-preview-versus-pipebuild.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/VSTS/vsts-windows-connection-instructions.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/VSTS/vsts-windows-connection-instructions.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Project-Docs/VSTS/vsts-windows-connection-instructions.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Project-Docs/VSTS/vsts-windows-connection-instructions.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,27 +0,0 @@
-# VSTS Windows Azure VM agent connection instructions
-
-1. Go to https://resources.azure.com/subscriptions/84a65c9a-787d-45da-b10a-3a1cefce8060/resourceGroups/dnceng-build-agents/providers/Microsoft.Network/loadBalancers/LB-helixage/inboundNatRules
-
-2. Machines are named as “helixage_[virtual machine #]” so look at the end of the `"name"` property which references the virtual machine # you care about.
-
- Example:
- ```JSON
- {
- "name": "LoadBalancerBEAddressNatPool.0",
- }
- ```
-
- This entry represents “helixage_0”
-
-3. In that item, find the "port #" from the `"value.properties.frontendPort"` value
-
-4. Connect to the machine: `mstsc /v: dnceng-helix.westus2.cloudapp.azure.com:[port #]`
-
- a. Username: dotnet-bot
-
- b. Password is available from **HelixProdKV** as *HelixVMAdminPassword*
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CProject-Docs%5CVSTS%5Cvsts-windows-connection-instructions.md)](https://helix.dot.net/f/p/5?p=Documentation%5CProject-Docs%5CVSTS%5Cvsts-windows-connection-instructions.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CProject-Docs%5CVSTS%5Cvsts-windows-connection-instructions.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/PublishConsumeContract.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/PublishConsumeContract.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/PublishConsumeContract.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/PublishConsumeContract.md 2023-11-13 13:20:34.000000000 +0000
@@ -53,7 +53,7 @@
## Onboarding
-Onboarding a repo to the toolset will be a [simple process](https://github.com/dotnet/arcade/tree/main/Documentation/Project-Docs/buildtools-bootstrap.md).
+Onboarding a repo to the toolset will be a [simple process](https://github.com/dotnet/dnceng/tree/main/Documentation/ProjectDocs/buildtools-bootstrap.md).
We will provide links to zips / tarballs to acquire the basic pieces necessary for bootstrapping the core tools SDK on a supported platform.
@@ -106,7 +106,7 @@
- Utilities, exe's, scripts, etc which are part of the package functionality must be usable via MSBuild properties / targets. You should not have a collection of executable files in your package which do not include MSBuild entry points for using them.
-- Additional package guidelines are outlined [here](https://github.com/dotnet/arcade/tree/main/Documentation/Project-Docs/Toolshed/TaskPackages.md#implementation-details)
+- Additional package guidelines are outlined [here](https://github.com/dotnet/dnceng/tree/main/Documentation/ProjectDocs/Toolshed/TaskPackages.md#implementation-details)
- Packages need to include accountability information in the nuspec. At a minimum, source repository link and commit SHA.
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Staging-Pipeline/making-and-validating-changes.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Staging-Pipeline/making-and-validating-changes.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Staging-Pipeline/making-and-validating-changes.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Staging-Pipeline/making-and-validating-changes.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,68 +0,0 @@
-# Making and Validating Changes to the Staging Pipeline
-
-Need to edit the staging pipeline for some reason? This doc has you covered.
-
-## Making Changes
-
-The staging pipeline ([Stage-DotNet](https://dev.azure.com/dnceng/internal/_build?definitionId=792)) is fairly easy to make changes to.
-The pipeline has two main components: its [YAML](https://dev.azure.com/dnceng/internal/_git/dotnet-release?path=%2Feng%2Fpipeline&version=GBmain&_a=contents) and the [Release CLI/Library C# code](https://dev.azure.com/dnceng/internal/_git/dotnet-release?path=%2Fsrc%2FMicrosoft.DotNet.Release&version=GBmain&_a=contents).
-
-The Release CLI makes extensive use of dependency injection.
-New changes may need to also modify [`ServiceCollectionExtensions.cs`](https://dev.azure.com/dnceng/internal/_git/dotnet-release?path=%2Fsrc%2FMicrosoft.DotNet.Release%2FMicrosoft.DotNet.ReleaseCli%2Fsrc%2FServiceCollectionExtensions.cs&version=GBmain) in order to have their dependency injection work properly.
-Additionally, any new operations will need to be added to [`Program.cs`](https://dev.azure.com/dnceng/internal/_git/dotnet-release?path=%2Fsrc%2FMicrosoft.DotNet.Release%2FMicrosoft.DotNet.ReleaseCli%2Fsrc%2FProgram.cs&version=GBmain) in alphabetical order.
-
-The [Signing Extensions](https://dev.azure.com/dnceng/internal/_git/dotnet-release?path=%2Fsrc%2FMicrosoft.DotNet.Release%2FMicrosoft.DotNet.Signing.Extensions&version=GBmain) project is where all of our signing-related tasks live. If you need to make changes to the signing setup, modify this project.
-
-The Release CLI, Release Library, and Signing Extnesions already have tests alongside their code.
-Any new functionality should have test coverage added to it.
-
-## Validating Changes
-
-So you've recently made a change to the pipeline and you want to make sure you're not going to break anything?
-Good for you! Lucky for you, we (the authors of this document) have done some work to make that easier for you.
-
-### Stage-DotNet-Test
-
-This pipeline is your best friend when it comes to validating changes in the staging pipeline.
-It has a separate entry point from Stage-DotNet ([`staging-test-pipeline.yml`](https://dev.azure.com/dnceng/internal/_git/dotnet-release?path=%2Fstaging-test-pipeline.yml&version=GBmain&_a=contents) vs. [`staging-pipeline.yml`](https://dev.azure.com/dnceng/internal/_git/dotnet-release?path=%2Fstaging-pipeline.yml&version=GBmain&_a=contents)),
-but otherwise uses the exact same YAML templates and files past that entry point.
-A few notable changes from the staging pipeline to the staging test pipeline:
-
-* Stage-DotNet-Test has a BAR ID prefilled for you – we've created a build that is known to work.
-Note that when we say "work," we don't mean "entirely green"; you will still notice some orange circles in the validation steps.
-However, all the pipeline functionality itself will work perfectly for you.
-* Stage-DotNet-Test skips over approval stages – no need to babysit it!
-* Stage-DotNet-Test tests publishing by actually publishing to temporarily created feeds and containers,
-so any changes to publishing will actually be validated!
-
-Simply run the test pipeline on your branch and then wait six hours for it to finish and you'll be golden!
-
-### What? Six Hours?
-
-Okay, okay. If you have to iterate rapidly for some reason,
-there is a way to save time and test only a particular stage or set of stages.
-
-1. Find a previous, successful run of the test pipeline. Copy the Build ID (the bit after `buildId=` in the URI).
-2. Pull up the YAML file for the stage(s) you want to run. Add the following inputs to any `DownloadPipelineArtifact` tasks:
-```yaml
- source: 'specific'
- project: '7ea9116e-9fac-403d-b258-b31fcf1bb293'
- pipeline: 799
- preferTriggeringPipeline: true
- runVersion: 'specific'
- runId: the build ID copied from earlier
- allowPartiallySucceededBuilds: true
- allowFailedBuilds: true
-```
-Note: if you want to use artifacts from Stage-DotNet instead of Stage-DotNet-Test, set `pipeline` to `792` instead.
-
-3. Open up [`eng/pipeline/stage_dotnet.yml`](https://dev.azure.com/dnceng/internal/_git/dotnet-release?path=%2Feng%2Fpipeline%2Fstage_dotnet.yml&version=GBmain&_a=contents) and comment out all of the stages prior to the one you're testing.
-Then make sure to comment out the dependencies on those stages.
-4. Running the pipeline now will skip the most time-consuming stages and go straight to the stage you want to test.
-
-Please still make sure to run the full test pipeline before checking in.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CStaging-Pipeline%5Cmaking-and-validating-changes.md)](https://helix.dot.net/f/p/5?p=Documentation%5CStaging-Pipeline%5Cmaking-and-validating-changes.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CStaging-Pipeline%5Cmaking-and-validating-changes.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Staging-Pipeline/running-the-pipeline.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Staging-Pipeline/running-the-pipeline.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/Staging-Pipeline/running-the-pipeline.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/Staging-Pipeline/running-the-pipeline.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,31 +0,0 @@
-# Running the Staging Pipeline
-
-Running DotNet-Stage may seem intimidating at first, but fear not!
-It's only a little bit complicated.
-
-## Parameters
-
-The actual part you need to understand before running is the pipeline parameters. Here's a full description of them:
-
-1. **Bar Build Ids** – a comma-separated list of BAR IDs to use as the basis for the release. These should be installer BAR IDs.
-2. **Treat build as...** – optionally override project as public or internal. If you know you want it to publish to public feeds/containers, you choose public; if you want to publish to internal feeds/containers, you choose internal. Normally, "default" should be selected.
-3. **Security Release** – should be checked if this is a security release.
-4. **Release Date** – corresponds to date of actual release (not today's date).
-5. **CVE List** – if Security Release is checked, this should be filled out with the relevant CVEs fixed by the release. CVEs should be listed in the format `CVE--`.
-6. **Certificate Substitutions** – not necessary in most cases. If you want to sign Windows files with a different certificate, you specify "{old certificate}={new certificate}" in this box to replace {old certificate} with {new certificate}.
-7. **Always Download Asset List** – used for partial releases. Makes sure that we download that file which is required for releases. Leave as default for most cases.
-8. **Skip Publish to vsufile** – only used for testing purposes.
-
-If you want to only run up to a certain stage, you can disable all later stages using the **Stages to run** feature of Azure Pipelines.
-
-## Failures
-
-There are several manual overrides for stages which are allowed to have failures – these stages require approval if the stage they're related to had any failures. For information on common causes of failure in these stages, check our [validation documentation](https://github.com/dotnet/arcade/blob/main/Documentation/Validation.md#what-do-i-do-if-an-issue-is-opened-in-my-repository).
-
-## Branching for Arcade Release
-
-Dotnet-Release should be branched on the same cadence as Arcade. Check the [Arcade Servicing doc](https://github.com/dotnet/arcade/blob/main/Documentation/Policy/ArcadeServicing.md) for more information on when branching occurs.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CStaging-Pipeline%5Crunning-the-pipeline.md)](https://helix.dot.net/f/p/5?p=Documentation%5CStaging-Pipeline%5Crunning-the-pipeline.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CStaging-Pipeline%5Crunning-the-pipeline.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/availability.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/availability.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/availability.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/availability.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,30 +0,0 @@
-# Hybrid Availability Expectations
-
-## Introduction
-Please note that this guidance is West Coast centric as Fargo and Prague are coming up with their own guidelines.
-
-## Core Hours (e.g. hours that folks must be available):
-Strictly speaking, there are no “core hours” as this goes against the spirit and intent of hybrid. However, see next sections on meetings and first responder duties.
-
-## Meeting Availability:
-Meeting (remote and in person) attendance is expected from time to time. As per our async principles, a focus should be put on the value of any meetings scheduled so we can better respect the different time zones and hybrid work patterns. Regarding timing, meetings fall into three categories:
-- Meetings which include the Prague team should generally be scheduled from 8:30am PST until 10am PST, and no later than 11am PST on exceptional cases.
-- Meetings which include only folks from the continental US should generally be scheduled from 10am PST until 2pm PST.
-- V-Team meetings have no set timeframe and is up to the v-team themselves.
-
-## Days in Office:
-There is no expectation that there be a set schedule for days in the office as we’re emphasizing flexibility.
-
-Optional: Add your in-office days to your Teams' status
-
-## First Responders:
-As a team we have a commitment to respond/help our customers during a certain time frame. Given this, it’s important that each dev is available while on FR rotation according to whatever is worked out by the FR lead.
-
-## Impact instead of Time:
-Each of us should be focused primarily on delivering the right impact according to our role and level. The assumption is that it’ll likely take 40(ish) hours a week to have the right impact – but this priority order is important. The implication is that there should be flexibility such that we’re able to employ the rhythms that work best for us, leaving ample time/opportunity for family, personal health, well being, as well as work.
-
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5CDevGuide%5Cavailability.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5CDevGuide%5Cavailability.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5CDevGuide%5Cavailability.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/azdoworkitemguidance.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/azdoworkitemguidance.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/azdoworkitemguidance.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/azdoworkitemguidance.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,26 +0,0 @@
-# Creating Azure Boards WorkItems
-
-For some issues, we will need to open Azure Boards workitems, rather than github issues. These include:
-
-* Security related changes (see [issue tracking guidance](IssueTrackingGuidance.md))
-* Any work for non-open source projects
-
-In these instances, we still want to have issues, and we still want to link them to Epics that are in arcade.
-
-## Opening an Azure Boards WorkItem
-
-* WorkItems should be opened in the [internal Azure DevOps project](https://dev.azure.com/dnceng/internal/_workitems/)
-* When creating a new work item, create the work item as a [task](https://dev.azure.com/dnceng/internal/_workitems/create/Task)
-* Set Area to internal\Dotnet-Core-Engineering
-* Give it a meaningful title and description
-* Update the GitHub Friendly Title and GitHub Friendly Description with information that can be shared on GitHub (in the public).
-* Add a link to the GitHub epic in the Epic Issue field.
-* After [#8567](https://github.com/dotnet/arcade/issues/8567) is complete, an issue linking your newly created Azure Boards work item will be created and added to the Projects (beta) board
- * The epic issue field will be filled out if the GitHub link was supplied
- * If the GitHub Friendly Title is set, the created github issue will use it. Otherwise, will use "Azure Boards Issue #[Issue Number]".
- * If the GitHub Friendly Description is set, it will be used and a link to the Azure Boards work item will be added. Otherwise, the issue will only have a link to the Azure Boards work item.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5CDevGuide%5Cazdoworkitemguidance.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5CDevGuide%5Cazdoworkitemguidance.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5CDevGuide%5Cazdoworkitemguidance.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/BestPractices.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/BestPractices.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/BestPractices.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/BestPractices.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,24 +0,0 @@
-# Best Practices for .NET Engineering Services
-
-The intent of this document is to help the team learn and grow by sharing best practices that we have found.
-
-## Keeping Our Repos Healthy and Ready for Roll Out
-- Anyone who checks in a change still needs to monitor the next main run, in any repo.
- - PR validation is not the same as deployment to the staging environment and there will always be problems missed by PR validation unless we deploy an entire environment for every PR, which is not currently possible.
- - The goal is to have a vendor monitoring our important pipelines - [Helix Machine Lifecycle Daily Process](https://dnceng.visualstudio.com/internal/_wiki/wikis/DNCEng%20Services%20Wiki/952/Helix-Machine-Lifecycle-Processes?anchor=daily%3A) - but everyone on the team should still make sure we are able roll out at any time.
- - It’s a good principle to ask people to look at the next main run, it’s an even better one to not allow oneself to be broken for days at a time unnecessarily.
-- Verify deployment and close issues you placed in the "waiting for rollout" column on our Project Board
- - This is especially true for anything associated with grafana alerts. We may miss new alerts as they are concatinated to an existing issue.
- - It is not the responsiblity of the individuals performing deployments to verify your issue is complete and closed out.
-- The autoscaler is quite different from everything else in dotnet-helix-machines
- - It is the only service within this repo and it causes us to duplicate any efforts involving Service Fabric changes
-
-## First Responder/Operations Work
-- The Operations v-team - including our vendor resource - is responsible for triaging any internal work that come in from S360 and other internal notifications (i.e. emails from security, policy notifications from PM, etc). Any work that they determine as meeting the First Responder bar will be tagged for FR and be addressed by that virtual team.
-- Current First Responder responsibilities, best practices and how to documentation can be found at our [Team Wiki](https://dev.azure.com/dnceng/internal/_wiki/wikis/DNCEng%20Services%20Wiki/889/Home)
-
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5CDevGuide%5CBestPractices.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5CDevGuide%5CBestPractices.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5CDevGuide%5CBestPractices.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/documentationguidelines.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/documentationguidelines.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/documentationguidelines.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/documentationguidelines.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,11 +0,0 @@
-# Documentation Guidelines
-
-All documentation should be added to the `dotnet/arcade` repository, [Documentation folder](https://github.com/dotnet/arcade/tree/main/Documentation). Please follow any procedures for adding new documentation to this folder as you would if you were adding the documentation to the soon-to-be-deprecated `dotnet/core-eng` repo. (For example, following the procedure in the One-Pager template regarding where project one-pagers should live.)
-
-**NOTICE**: Use your best judgement to exclude documentation that may have security implications. When in doubt, talk to the team or use placeholders in your pull requests to add documentation as not to accidentally expose sensitive information.
-
-If you have documentation that contains sensitive information that external-to-Microsoft folks should not have access to, then that documentation should be linked to from the public documentation and it should be placed in the Azure DevOps dnceng/internal wiki.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5CDevGuide%5Cdocumentationguidelines.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5CDevGuide%5Cdocumentationguidelines.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5CDevGuide%5Cdocumentationguidelines.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/happyhour.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/happyhour.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/happyhour.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/happyhour.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,17 +0,0 @@
-# Hybrid Happy Hours
-
-- Happy Hours should be both Teams and physical meetings. A nearby conference room should be booked for as many recurrences as possible.
- - The meeting owner should have calendar reminders to make sure they book re-book the conference room for the next batch of recurrences every few months.
-- Just like regular meetings, bringing laptops to Happy Hours is encouraged. This facilitates better face-to-face interaction with people who are participating on Teams.
- - Coincidentally, it's also useful for Jackbox.
-- Any games projected on the screen should also be screen-shared on Teams.
- - Bring a capture card for console games (if possible; Jon owns one).
- - Games should ideally allow remote participants to play as well; Jackbox games are a great example of this. However, it's totally okay to sometimes play something that's
- not entirely remote-friendly (look, Jon really likes destroying everyone at Mario Kart), but even then, make sure it's screen-shared.
- - If a game supports neither screen-sharing nor online multiplayer, it's best not to play it.
-- Pop in and pop out at will – no need to feel like you have to stay for the full two hours! This applies to both virtual and in-person participants.
- - Relatedly, feel free to continue working while attending – we love to have you there even if you can't participate in whatever games we're playing.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5CDevGuide%5Chappyhour.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5CDevGuide%5Chappyhour.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5CDevGuide%5Chappyhour.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/hybridcollab.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/hybridcollab.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/hybridcollab.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/hybridcollab.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,32 +0,0 @@
-# Make Hybrid Work.....work
-
-**TL;DR:** Recognize that coming into work does not necessarily imply coding. Rather, coming into socialize and collaborate in person is very important in itself.
-
-As we experiment and experience the realities of hybrid, many of you are noticing what research is also showing – namely that solid work relationships are very important, and that hybrid/remote work can have a negative effect on those relationships.
-
-Given this, we agree to **designate one day a week where everyone in Redmond agrees to make best effort to come into the office for the express purpose of collaborating and socializing with the rest of the team.**
-
-**Agreement Specifics:**
-- We will each do our reasonable best to be present each Thursday from 11am to 2pm for the express purpose of interacting with others, collaborating, chatting, and otherwise connecting - often informally - with our co-workers.
-- No coding is expected during this time. (of course, it's alright to code too...)
-- When on First Responder, taking care of our customers takes priority.
-- It is understood that some are remote, and we should continue to be intentionally inclusive for those as well wherever and however possible.
-
-**Principles:**
-1. Relationships are an important and necessary part of our work here at Microsoft and should be prioritized as such.
-2. Relationships are easier to maintain and build on when there's consistent face-to-face interactions where possible.
-3. Social/collaborative interactions with our co-workers should be prioritized as an important part of our job - e.g. work.
-
-**Rationale:**
-
-Research is showing more and more that social interactions are more of a "must have" than a "nice to have". In a recent write up from Microsoft Research ([Great Expectations: Making Hybrid Work Work](https://www.microsoft.com/en-us/worklab/work-trend-index/great-expectations-making-hybrid-work-work)), there are some interesting quotes in section 5 of the report.
-
-_One of the most felt aspects of remote and hybrid work is the impact it's had on our relationships. _[_Last year's Work Trend Index_](https://www.microsoft.com/en-us/worklab/work-trend-index/hybrid-work)_ revealed that teams became more siloed, and this year's study shows the trend one year later._
-
-_When people trust one another and have [social] capital, you get a willingness to take risks, you get more innovation and creativity and less groupthink - Nancy Baym, Principal Researcher, Microsoft Research_
-
-_When work-life balance is out of whack, most people cut out relationship-building for more urgent matters," says Constance Noonan Hadley, an organizational psychologist who studies workplace relationships. "Regardless of remote status, building relationships will still feel like a luxury workers cannot afford unless there is a shift in how time is prioritized and valued by managers_
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5CDevGuide%5Chybridcollab.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5CDevGuide%5Chybridcollab.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5CDevGuide%5Chybridcollab.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/hybridprinciples.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/hybridprinciples.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/hybridprinciples.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/hybridprinciples.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,38 +0,0 @@
-# Hybrid/Async Team Principles
-
-- Business goals are fully described, “done” is specified, and the path to completion articulated such that anyone from the working group can understand the purpose of the work with minimal clarification.
- - Epics are how we’re currently doing this.
-- The majority of communication done through “typed” (or formal) channels.
- - Each v-team has its own Teams channel – which has the entire dnceng team on it. (this has been working well so far)
- - Meeting notes and/or recordings are always taken and made widely available.
-- It’s easily knowable what each dev is working on, status, and next steps.
- - Stand ups are still an important team sync point. (For example, all work done is reflected on a team Kanban board)
- - Each dev may also set weekly expectations.
- - The specifics are more fully described in this dev guide.
-- Our team’s “dev process” (how we work) is written down as a living document.
- - How our team does what we do is codified in a "dev guide" (this set of md's in github) - which is constantly iterated on by devs themselves.
- - The goal is that this document is actually useful day-to-day for the devs. This is key to keeping the document truly “alive” and relevant.
- - As things change or are further clarified, the handbook is laid out well enough that it’s well known where to update it by anyone.
-- Async discussions are generally (but not always) prioritized over synchronous.
- - Devs are encouraged to question meetings by making sure there’s a clear agenda that would benefit by being conducted synchronously.
- - Synchronous meetings are still *extremely* important.
-- Intentional social interactions continue to be a top priority.
- - We're continuing “happy hour”
- - Periodic lunches and/or other events are scheduled every couple of months at a minimum
-- Everything starts with a document.
- - Proposal (Used to be meeting, but we need to move to a proposal first, then a meeting)
- - Team Process (Used to be “ask a manager”, but we need to move to update the handbook)
-- Each dev is a “manager of one”.
- - Each dev is largely autonomous, consistently unblock themselves, and make things better for themselves and the entire team.
-- Always iterating.
- - Work is done is in the smallest chunks possible, then iterating. Velocity is more important than size of change.
- - This thinking helps devs not get blocked on others.
-- Intentional handoff
- - When handing work to someone, make sure they have context
- - Any investigation/existing work should be written down in the same place the handoff is happening to make the handoff productive
- - Be clear why the handoff is happening (avoid mentions/forwards without indicating expectations or reasoning)
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5CDevGuide%5Chybridprinciples.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5CDevGuide%5Chybridprinciples.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5CDevGuide%5Chybridprinciples.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/hybridtriage.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/hybridtriage.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/hybridtriage.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/hybridtriage.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,44 +0,0 @@
-# Hybrid Triage
-
-## Triage Logistics:
-* One 30 minute synchronous meeting per week at each locale. In this case that’s one for Redmond, and a separate one for Prague.
-* One triage driver per locale, although the driver could be shared or rotating. Triage driver responsibilities:
- * Running CLI tool twice a week. Once the morning of the day synchronous triage occurs, and once 4 days later.
- * Scheduling and running the weekly synchronous triage meeting, and keeping it on track and focused.
- * Making sure stale issues are taken care of.
-* It continues to be expected that issues are reviewed async (offline) by each senior dev at least once during the week. This should be made much easier using the new async tool – however, I’m well aware that there will be a strong propensity to just waiting until the weekly synchronous meeting. However, please remember the following:
- * Seniors are expected to do things they don’t necessary like or are hard to do.
- * 30 minutes is not enough time to get through all the issues if we don’t keep up during the week.
- * The weekly meeting should be reserved for discussions about challenging or interesting issues – which is the thing that has been one of the things most missed. If we spend our time triaging boring issues….we won’t get that value.
- * The added visibility has been super important, and the easiest way to solve this is to review the majority of issues throughout the week.
-* Both locales should continue to review/triage all incoming issues. This is part of how we share context.
-
-## Scope:
-For now, we will limit triage to core-eng and arcade issues. Depending on how things go, we can discuss bringing other repos in later. (e.g. xharness etc)
-
-## Tools:
-* **Async Triage CLI** is a brand new tool that stuffs issue activity into the Async Triage Teams Channel.
-* We also use **ZenHub** to manage our issues and epics. Please let your lead know if you have problems with ZenHub licenses or want to know more how to use the tool. (for example one super nice feature is the ability to manage issues across multiple repos)
-
-## Policy:
-* Any dev can (and should) ask for further clarification as to what any issue is about from the creator. (any issue that does not get further clarification w/in 1 week is automatically closed)
-* Any dev can triage (assign the issue to an epic) if they feel confident in doing so.
-* All issues must be triaged within one week.
-* Any issues that are > 1 week old, must be dealt with at the weekly synchronous meeting.
-
-## What triage is: (refresher)
-* An issue is considered triaged when it’s assigned to a business priority. (we calls these Epics)
-* The V-Team assigned to an epic is responsible for triaging/prioritizing the issues within their Epic – which may include “kicking” issues back out for triage again – e.g. unassigning them from an epic.
-* Our current (now dated) documentation on Epics is here: https://dev.azure.com/dnceng/internal/_wiki/wikis/DNCEng%20Services%20Wiki/107/Sprint-Planning-and-Execution for a bit more background/rationale. Also, be sure and talk to your lead for further clarification, or you can reach out to me (Mark Wilkie) directly.
-
-## For a bit of history, a few things I think we’ve learned:
-* We tried async triage, and it does actually work (sorta/mostly) and would work even better with the new CLI tool.
-* Our teammates in Prague have appreciated the increased visibility (which is something we must continuing doing).
-* Most every dev didn't like not meeting synchronously.
-* The cost of not having serendipitous conversations is significant – but hard to quantify.
-* Ironically, async triage seemed less inclusive because the participation has gone down, not up. (it’s been falling to a subset of folks to make sure things are triaged)
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5CDevGuide%5Chybridtriage.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5CDevGuide%5Chybridtriage.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5CDevGuide%5Chybridtriage.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/IssueTrackingGuidance.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/IssueTrackingGuidance.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/IssueTrackingGuidance.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/IssueTrackingGuidance.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,26 +0,0 @@
-# Adding a new issue
-- If you know which epic the issue is a part of, add the issue to the appropriate epic using the *Epic Issue* field. This corresponds to the business priority the issue contributes to. **If you don't know which epic the issue belongs to**, do not fill this out—it will be filed appropriately at triage.
-- Don't add it to a *Release* or *Project*.
-- Feel free to assign it. This will be considered a recommend when it's triaged.
-- Feel free to assign a milestone if you think you might know. This will be considered a recommendation when it's triaged.
-- If the issue comes from a customer bug report, file it under the **First Responder** label. If the issue is specifically blocking a customer, assign it the **Critical** label as well.
-
-# Security issues
-- If the issue has potential security impact, it should *not* be filed on GitHub. Instead, file it under [AzDO (dnceng/internal)](https://dev.azure.com/dnceng/internal/_workitems/) and make sure to triage it against SDL's [Security Bug Bar](https://aka.ms/sdlbugbar).
-- Once filed, file a "tracking issue" on GitHub so that we can keep track if it through regular standup work. These issues should be titled something like "AzDO Issue #[Issue Number]" and contain a link to the AzDO issue.
-- All discussion of the issue should take place on AzDO or internal email, *not* on GitHub.
-
-# Why we're doing this at all
-- We'd like to lower the "pain" of entering and tracking our work. We've got consistent feedback that GitHub issues are much easier to use than AzDO - although not as feature-rich. However, we need everyone's participation to make this work...
-- There's general consensus that there's better visibility and that things will be generally more accessible.
-
-# Process Notes
-- All issues (regardless of type) will be triaged on a regular basis.
-- Triage will "fill out" the issue as needed so that it's actionable.
-- If triage assigns an issue to a sprint (milestone), this is a recommendation only. The actual assignments are done during sprint planning.
-- During sprint planning, all issues assigned to the sprint being planned will be considered first, and then the backlog considered after that. This means that if you think something is important, please assign it to the sprint you'd like to see it get done in as a recommend. (then follow up in person too of course as appropriate)
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5CDevGuide%5CIssueTrackingGuidance.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5CDevGuide%5CIssueTrackingGuidance.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5CDevGuide%5CIssueTrackingGuidance.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/meetings.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/meetings.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/meetings.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/meetings.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,48 +0,0 @@
-# Meetings
-
-## Introduction
-In this new hybrid world there are many challenges ahead. This is our current attempt as a team to navigate. We know we're wrong at least in some ways, but will bravely move forward, learn, and then adjust as appropriate.
-
-## Definition
-To distinguish team gatherings that fall into the guidelines below from others, we should define what we mean by a meeting.
-- A **Meeting** - for the context of our principles and guidelines below - is any gathering of the team where a planned discussion or decision is made that will long lived (more than a week).
-- Team social events (such as the weekly Prague Coffee Break or the Redmond Happy Hour), Stand Up (the daily one in Prague, the twice weekly one in Redmond, and the daily FR one), and 1:1 do not meet our definition of a meeting.
-
-## Principles
-- All of our meetings must be inclusive of everyone who wants to participate.
-- Meetings should be reserved for situations and discussions that are unreasonable or silly to have async.
-- Meetings are an important, necessary, and key part of belonging.
-- Social interactions are also a priority. (not just the agenda)
-- Nobody should feel like they *only* go to meetings all day
-
-## Guidelines
-- Every meeting is a Teams meeting. (regardless if synchronous or async)
-- Meetings should start after the hour (or half hour) so that folks have breathing space in between. At least five minutes are recommended, and ten minutes if reasonable.
-- Every meeting has recording turned on by default
-- A meeting agenda articulating what is hoped to be accomplished is highly encouraged.
-- Notes that articulate the main points discussed and conclusion are sent out after every meeting that is of general concern. (1:1's don't count of course)
-- When folks are meeting physically together, laptops (or other devices) should be used to aid remote folks. (see logistics note below)
-- The meeting organizer should *always* have a laptop.
-- Attendees (remote and in-person) should use the "hand raise" feature to request time to speak. The meeting organizer, or her designee, should monitor and use this list.
-- The meeting organizer, or her designee, should monitor Teams chat for relevant meeting information.
-- It's probably better to schedule meetings in a conference room and/or focus room so as to not disrupt the team room. However, there are undoubtedly times when having the meeting in the team room is appropriate.
-- Only one person should speak at a time
- - Modern conference room microphones are sensitive. Even quiet side conversations come through clearly to remote attendees.
- - Simultaneous speakers in a conference room quickly become incoherent to remote attendees and make it difficult to understand individuals.
- - Use the Teams meeting chat for side conversations or request the floor from the meeting organizer.
-- Close meetings on time. Conference rooms may be booked by others, which can cause an abrupt ending (and remote folks do not get to participate in the wrap-up hallway conversations that can happen after).
-
-## Logistics
-- A pool of laptops is available which can be 'checked out' and used for meetings. At this time, there's probably not enough for everyone, but we do have a few already. Also, most devs now already have a laptop which is helpful.
-- Meeting notes:
- - V-Team meeting notes are posted in the v-team channel in Teams.
- - Inter-Team notes are sent to the invite list.
- - General team meeting notes are sent to the whole team alias.
-
-## Possible Feature Requests
-- Teams to add the ability to auto deep link portions of the transcript
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5CDevGuide%5Cmeetings.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5CDevGuide%5Cmeetings.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5CDevGuide%5Cmeetings.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/readme.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/readme.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/readme.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/readme.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,50 +0,0 @@
-# Team Guide
-
-## Introduction
-The intent of this document is to describe (not prescribe) how we do things within our team. Every team has their norms, and we're no exception. Given the shift to hybrid, coming up with our new norms is critical. This document hopes to reflect and articulate what we've learned that works.
-
-This is a living document, meant to be updated as we learn and adjust. That is to say, this document should say *something* about most everything we do - and then we change it as we see fit.
-
-## Team Guide Principles
-- Every person on the team is encouraged to submit a PR to this doc as they see things that could/should be improved.
-- It's more important to try *something* than to find the perfect solution.
-- Experiment, learn, adjust and try again.
-
-## Dev Guide Discussion Guidelines
-As we try things, find they do/don't work, there's going to be a lot of discussion - with a wide variety of views. This is good. As we discuss and wrestle with these challenges before us, let's keep the following in mind:
-- Pointing out that something simply won't work is only helpful when there's also a proposal as we're looking for our best guess to try.
-- Expressing concerns is different than stating that something just won't work. We should all express our concerns.
-- Making an extra effort to point out parts that *should* work is very helpful. Not only does this improve how we all feel about things as a whole, but it serves as a reminder of what not to get rid of.
-- Keep in mind that nobody actually knows how this will work. We're all figuring this out together.
-- As always, be kind.
-- It's very important to capture data such that we're able to better explain our learnings. This will help also with any infrastructure investments we might need to make.
-- We will discuss the current state of the hybrid/async process during our monthly retrospectives to determine what is working, what isn't working, and how we can improve our process.
-
-## What Success Looks Like
-Let's not forget the whole point of why we're trying to write down how we think we should best work as a team.
-- Each team person feels like they belong, and are able to do their best work.
-- Team impact and throughput remains high (or even increases).
-- Where the dev lives, or what time of day they’re working, has limited effect on their own personal and overall team effectiveness.
-- Working groups (v-teams) span the globe, yet are effective.
-- Devs are often able to arrange their day and work when they are most productive and best for their situation.
-- Devs are rarely blocked by others in making forward progress.
-- Team diversity continues to increase.
-- Team morale and satisfaction are high.
-- Team responsiveness continues to be high and within the SLA.
-
-## Table of Contents
-This is a work-in-progress and is far from complete. The idea is to start with the trickiest areas we think we'll encounter after Stage 6 re-opening.
-
-- [Hybrid Team Principles](hybridprinciples.md)
-- [Making Hybrid Work....Work](hybridcollab.md)
-- [General Availability Guidance](availability.md)
-- [General Meetings](meetings.md)
-- Criteria for when to have synchronous meetings
-- [Hybrid Triage](hybridtriage.md)
-- [Stand Ups](standup.md)
-- [Documentation Guidelines](documentationguidelines.md)
-- [Happy Hour](happyhour.md)
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5CDevGuide%5Creadme.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5CDevGuide%5Creadme.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5CDevGuide%5Creadme.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/standup.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/standup.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/DevGuide/standup.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/DevGuide/standup.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,19 +0,0 @@
-# Standup Guidelines
-
-* Standups should be both in-person and Teams meetings
-* Standups occur twice a week on Tuesdays and Thursdays
-* All team members are expected to attend standup, except when there are conflicts
-* Standups can and will be preempted by meetings like all hands, Q&As, other org/division-wide meetings, and holidays.
-* The in person standup will take place in the team room, and the person running the meeting should share their screen on the television in the team room.
-* Where possible, an external mic should be used for the standup meeting so that those joining by Teams can hear everyone at the in person meeting.
-* Standup will use the [Standup View](https://github.com/orgs/dotnet/projects/86/views/10) of the .NET Core Engineering Services board.
- * The board should be organized such that it is grouped by the assignee, with issues in the Backlog (no status) or Done columns not shown.
- * FR issues are also not shown on the Standup view
- * All team members are responsible for updating their issues to make sure they are assigned to them and the status of the issues set
-* All team members are encouraged to stand during standup, but sitting is ok as well.
-* Standup should focus on quick status updates, with implementation details handled offline where possible.
-* While off topic conversations are fun and a good part of our culture, they should be kept to the end of the meeting so that people who need to leave can leave, without feeling like they may miss out on important business
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5CDevGuide%5Cstandup.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5CDevGuide%5Cstandup.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5CDevGuide%5Cstandup.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/artifact-maintenance-core-eng-14605.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/artifact-maintenance-core-eng-14605.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/artifact-maintenance-core-eng-14605.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/artifact-maintenance-core-eng-14605.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,77 +0,0 @@
-# dotnet-helix-machines Artifact Maintenance
-
-Epic: https://github.com/dotnet/core-eng/issues/14605
-
-The core of this issue is that we don't have a process for updating our helix-machines artifacts on a regular cadence of any kind.
-The purpose of this document is to lay out such a process for comment and ridicule.
-
-## Goal
-We want to create a process that a team of vendors can execute on a regular cadence to keep the artifacts we install on our machines up to date.
-No automation is planned as part of this process.
-
-There are two primary reasons for this work:
-1. Security – keeping old artifacts can lead to potential vulnerabilities as minor version updates frequently contain security updates.
-2. Stability – minor version updates often contain simple bugfixes that may require patching.
-
-## Stakeholders
-Primarily the .NET Engineering Services team, but ultimately all customers using dotnet-helix-machines since not updating artifacts on a regular
-cadence can lead to security issues, and updating them to versions that have breaking changes can obviously cause issues for them as well.
-
-## Problems
-The following problems have been copied directly from the epic for reference.
-
-* For some artifacts, we don't really know what version we get because we're asking a given source (Windows, package manager, etc) for the name directly; this is thus not logged at all.
-* Given we copy many installers and packages to our own storage accounts, and these packages may require manual intervention, this copying is sometimes difficult to automate.
-* Even when we do know there is a newer version, we often cannot just take major release updates to a component as this would be breaking/require a hefty re-write of the code using this dependency.
-* When we can tolerate them, we need a strategy to deal with major version updates, involving making sure a validation plan is in place, and ensuring a process to communicate to product teams to let them know
-
-### Necessary Ongoing Tasks
-While automation can be improved to make this better, these sorts of things need to happen on a regular cadence:
-
-* Check for the latest version available (will vary by OS) and compare what version we have.
-* Proactively try to adopt newer versions of dependencies before required
-* Teach helix machines to have more info about artifacts (such as "where do we get this one?", "reasons to avoid major version upgrades", simple update instructions, etc.). This could be as simple as a
-required text file in the artifact directory.
-
-## Proposed Implementation
-I think that we should start treating artifacts in a way similar to how we now treat secrets. At a high level, this means:
-
-* We should include metadata on updating them – every artifact should have an `README.md` document included alongside it (similar to the validation
-scripts we require today). Artifacts missing these will fail the build. This document will include detailed information on updating the artifact (e.g. where
-the artifact comes from, how to tell if there is an updated version, the major version to pull from, where to upload the artifact, what part of the YAML to update accordingly, etc.).
-* The operations vendor team should have a scheduled monthly pass of all artifacts and follow the instructions laid out in the README.md.
-* We should add as many assets to Component Governance as possible so that we receive updates on CVEs so we can do an immediate update. This could possibly be done via
-an automatically generated cgmanifest.json.
-
-### netcorenativeassets
-Currently, most of our artifacts are stored in the netcorenativeassets storage account and downloaded from there. This is a good model from a security
-perspective; however, we currently only update those artifacts on-demand. For these artifacts, the vendor will need to check the canonical
-source for these artifacts (e.g. python.org for python) for the most recent version of the artifact, upload it to netcorenativeassets, and then create
-a PR to dotnet-helix-machines with the update version. **This will only be done once newly released artifacts are mature unless there is a CVE in our
-currently-used version**. "Mature" in this case means the artifact has been released for over a month.
-
-### Package managers
-
-The north star here is to eventually pull everything from private Azure DevOps feeds.
-
-#### Pip
-Pip should use internal feeds which upstream to public for now. Eventually, we should migrate to Terrapin. Once we're using Terrapin, we should stop verison
-pinning as using latest from our private feed will be more than adequate.
-
-#### Linux packages
-Currently, the accepted model for Linux packages is to use the standard package manager's repositories. Because we do not ask for specific versions
-of these packages, we make the (possibly dangerous) assumption that they are kept up-to-date. Presumably, these may eventually be moved to
-internally-controlled package repositories.
-
-### Handling Major Version Updates
-As mentioned in the problems section, major version updates are a sticky problem to solve as they potentially have breaking changes for our customers
-and thus require manual review. At end-of-life for a major version, we should force an upgrade to a more recent version of the artifact and remove it
-from our machines. Otherwise, on requests for major versions, we can deploy them side-by-side with previous versions to reduce pain.
-
-## The Work
-In addition to further documenting the process for maintaining artifacts, the primary work for this epic will actually be writing update
-docs for all of our currently-existing artifacts.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cartifact-maintenance-core-eng-14605.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cartifact-maintenance-core-eng-14605.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cartifact-maintenance-core-eng-14605.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/async-triage-core-eng13288.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/async-triage-core-eng13288.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/async-triage-core-eng13288.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/async-triage-core-eng13288.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,62 +0,0 @@
-# Asynchronous Triage Bot
-
-We want to implement a mechanism that will allow newly-created issues on GitHub that still need to be triaged to automatically have their own dedicated conversation started on Teams. It would also be useful to denote Teams conversations as [Triaged] or [Needs Triage] in Teams for clarity.
-
-See Epic for more context: https://github.com/dotnet/core-eng/issues/13288
-
-## Stakeholders
- - .NET Engineering Services team
-
-## Risk
-
-### Unknowns and Open Questions
-- How can we distinguish if an issue needs triage, or if it was just created and will immediately have an Epic assigned to it? ([link to issue](https://github.com/dotnet/core-eng/issues/13457))
- - Suggested - a background job that occasionally scans for unassigned issues and adds a conversation on Teams if necessary, rather than generating a conversation every time an issue is created. This also addresses an interesting case in which an issue was assigned to one Epic, then unassigned.
-- Some issues may not need to be assigned to an Epic (ex. customers that open issues for themselves).
- - Suggested - marking these issues with a label for the bot to ignore, or triage assigns them to the "work tracking for other teams" epic.
-
-### Proof of Concepts
-- Verify that we are able to open a new conversation in a channel on Teams when a new issue is opened on GitHub. Try to utilize some similar functionality to the FR Mention Bot.
-- Verify that additional comments added to an issue update their dedicated Teams conversation.
-- Verify that existing Teams conversations can be updated when triaged, such as denoting [Triaged] in the conversation title.
-- Verify that we can grab all the existing unassigned issues from GitHub (this will require the use of the ZenHub API to check if it is in an Epic).
-
-### Dependencies
-- GitHub API
-- ZenHub API
-- Teams
-
-Additionally, the solution should mostly be a new addition to the Arcade Services repo. Note that most of the GitHub webhook functionality is already implemented in Arcade Services. The goal to have the work completed by is September 10th at the latest.
-
-## Serviceability
-
-### Testing
-To avoid sending actual requests to Teams and cluttering up the channel, we can test some functionality of the code by creating mock HTTP calls and checking if they send requests in the right cases. We can also try creating a hidden channel on Teams for integration testing.
-
-### Security
-Identifying secrets that will be used include the Teams channel connector URI and authentication for the GitHub app. Note that all PII is owned by GitHub and Teams.
-- The "bot" account that will be posting to Teams will also require authentication and must have permission to access channel messages, add replies to a discussion, etc.
-- Setting up repro/test/dev environments: this will vary based on implementation details; the environment may simply be able to be set up and opened as a VS project, or might require a more complicated process.
-
-### Rollout and Deployment
-- The solution will be most likely be deployed on Arcade Services, and thus on the Arcade Services cadence. Once deployed, check the Async Triage channel on Teams for updates.
-- While nothing is being deprecated, we will probably remove the "Needs Triage" and "I Think This is Triaged" Fabric bot tags once the solution is deployed.
-- What are the risks during deployment?
- - If the solution stops working, new untriaged issues may go unnoticed without a conversation being created on Teams. It may require some monitoring to make sure that untriaged issues are consistently being added on Teams.
-
-## Usage Telemetry
-Suggestions for tracking "usefulness" of the solution include measuring the average amount of time it takes for an issue to be triaged, how many people are involved, the average length of conversations, etc. However, more discussion is needed to figure out how the necessary information will be collected.
-
-## Monitoring
-- Is there existing monitoring that will be used by this epic?
- - On the Teams end, we can monitor if there are an abnormal amount of non-successful requests made to Teams.
- - Suggested - using Azure Application Insights to monitor the functionality of the GitHub controller.
-- If new monitoring is needed, it should be defined and alerting thresholds should be set up.
-
-## FR Hand off
-- We should create some documentation on what functionalities the solution currently possesses and link to the code for more information. The code should also be clearly documented.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Casync-triage-core-eng13288.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Casync-triage-core-eng13288.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Casync-triage-core-eng13288.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/automated-image-generation13997.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/automated-image-generation13997.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/automated-image-generation13997.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/automated-image-generation13997.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,162 +0,0 @@
-# Automated Image Generation
-
-[Link to Automated Image Generation Epic](https://github.com/dotnet/core-eng/issues/13997)
-
-## Summary
-
-### Motivation
-Currently only way how to generate Helix custom images for our Windows queues is to ask DDFUN. This happens on a weekly basis, always when we want to update Visual Studio versions or include the newest Windows patches. This process has several drawbacks from allocating someone from DDFUN team to introduction of typos by manual steps.
-
-
-### Goal
-The goal of this epic is to automate the process of custom image generation, so this can be done by our team or vendors, without the need of external teams. These are images which contains various versions of Visual Studio and various versions of Windows. We will introduce a new automated and monitored process which removes the need for manual steps done by DDFUN.
-
-When this is completed we will be able to:
-* generate images ourselves without spending time on coordination with DDFUN, similarly to other teams using Image Factory
-* specify new versions at one place instead of tens of configuration files
-* automatically regenerate images on change of configuration files
-* monitor completion of process instead of relying on notifications from DDFUN
-
-Part of this epic is to take the ownership of [custom image definitions](https://devdiv.visualstudio.com/XlabImageFactory/_git/ImageConfigurations?path=%2FMonthly%2FHelixBaseImages) that currently reside with DDFUN to get complete control over the definitions and to be able to run our automation against them seamlessly. This will be part of new documentation so maintenance of these definitions can be done also by vendors.
-
-### Implementation Details
-
-Let's start with a scenario where we need to update Visual Studio 2019 Preview version per our [schedule](https://dev.azure.com/dnceng/internal/_wiki/wikis/DNCEng%20Services%20Wiki/107/VS2019-Upgrade-Schedule) which has to be done almost every week.
-
-Currently this change requires update of same values in six [image definitions](https://devdiv.visualstudio.com/XlabImageFactory/_git/ImageConfigurations?path=/Monthly/HelixBaseImages/VS2019Preview). Specifically:
-* update artifact windows-vs-willowreleased, set parameter VSBootstrapperURL to a new value.
-* update version in parameter CustomImageName under Destination.
-
-To simplify this, we will introduce templating, so variables are defined at exactly one place and are not duplicated across multiple files, similarly to what we have in OSOB.
-
-#### Example:
-Instead of hardcoding the same version and the same URL at [six places](https://devdiv.visualstudio.com/XlabImageFactory/_git/ImageConfigurations?path=/Monthly/HelixBaseImages/VS2019Preview), we introduce template variable {VS_2019_PREVIEW_URL} which declares an URL to Visual Studio artifact and the template variable {VS_2019_PREVIEW_VERSION} which declares version of Visual Studio.
-
-All templated variables will be stored in one file.
-
-
-1. Given our example scenario, we will update variables VS_2019_PREVIEW_URL and VS_2019_PREVIEW_VERSION. And this change will be pushed into the repository. This change triggers our new build pipeline under Azure DevOps.
-
-2. The pipeline executes command line tool which processes all payloads and substitutes all template variables from the variable file and call Image Factory with this payload. To prevent duplication we will calculate hash of payload and store it a simple Azure storage table and call Image Factory only for payloads which haven't been processed yet. If needed it will be possible to force rebuilding of images.
-
-3. The pipeline won't block until all images are completed. It will post Image Factory jobs, store tracking ids in Azure storage table mentioned above and finish.
-
-4. To get results, the same pipeline will be executed periodically (e.g. every hour) and check states of all pending Image Factory jobs and provide summary report so it's clear if all images that were requested are ready. In case any image build fails, the pipeline will fail and FR will be notified by email.
-
-5. Once Helix custom images are generated, FR has to create an OSOB PR with updated image names. Existing OSOB post validation performs version test of Visual Studio.
-
-Documentation of the Image Factory API can be found [here](https://devdiv.visualstudio.com/XlabImageFactory/_wiki/wikis/XlabImageFactory.wiki/6330/AccessingImageFactory).
-
-
-## Take ownership of Helix custom image definitions
-
-Making custom image definitions templated requires modifications. This is why we need to move all [definitions](https://devdiv.visualstudio.com/XlabImageFactory/_git/ImageConfigurations?path=%2FMonthly%2FHelixBaseImages) under our repository. It was confirmed by DDFUN (Casey) that these definitions aren't shared with any other team. Beside changing URL and version the structure is left unchanged, so there isn't any additional maintenance cost related to owning these definitions.
-
-Currently the definitions with DDFUN use YAML only to be converted to JSON payloads. As part of the move I would suggest to start using JSON file format as it's expected input of the Image Factory. The only benefit of YAML are comments, but in our case these comments are copy pasted across all definitions and don't add any additional value.
-
-Here is an example of a fragment of a templated image definition with template variables VS_2019_PREVIEW_URL and VS_2019_PREVIEW_VERSION:
-```
-{
- "Artifacts": [
- {
- "Name": "windows-vs-willowreleased",
- "Parameters": {
- "WorkLoads": "reduced",
- "Sku": "Enterprise",
- "VSBootstrapperURL": "{VS_2019_PREVIEW_URL}/vs_Enterprise.exe"
- }
- }
- ],
- "Destination": [
- {
- "StorageAccountName": "heliximgfctdncwus2",
- "CustomImageName": "Helix-Server-DataCenter-19H1-ES-ES-VS2019-Preview-Enterprise_{VS_2019_PREVIEW_VERSION}",
- "SubId": "84a65c9a-787d-45da-b10a-3a1cefce8060"
- }
- ]
-}
-```
-
-_Note: Nested templated variables won't be supported._
-
-Variables file:
-```
-{
- "VS_2019_PREVIEW_URL":"https://aka.ms/vs/16/pre/133508311_-1151188015",
- "VS_2019_PREVIEW_VERSION":"16_11_2_1",
-}
-```
-
-
-## Stakeholders
-
-- .NET Engineering Services
-
-## Risk
-
-- Are there any POCs (proof of concepts) required to be built for this work?
-
- POC for Visual Studio images was done before creating this one pager.
-
-- What dependencies will this epic have? Are the dependencies currently in a state that the functionality in the epic can consume them now, or will they need to be updated?
-
- This epic depends on external service Image Factory. It's functionality is currently stable and no breaking changes are planned. I asked to notify our team in a case of changes.
-
-- Will the new implementation of any existing functionality cause breaking changes for existing consumers?
-
- Implementation of this shouldn't cause breaking changes as all issues with images should be detected during OSOB validation phase.
-
-
-## Serviceability
-
-- How will the components that make up this epic be tested?
-
- CLI will be tested by unit tests. There will be also scenario test which generates one image and verifies it.
-
-- Identifying secrets (e.g. PATs, certificates, et cetera) that will be used (new ones to be created; existing ones to be used).
-
- Existing secrets:
- * image-factory-tenant-id
- * image-factory-client-id
- * image-factory-client-secret
- * image-factory-resource-id
-
- New secret:
- * image-factory-state-connection-string
-
-- Does this change any existing SDL threat or data privacy models? (models can be found
-in [core-eng/SDL](https://github.com/dotnet/core-eng/SDL) folder)
-
- It doesn't change SDL threat or data privacy models.
-
-
-## Rollout and Deployment
-
-- A new Azure DevOps build pipeline will take specific version of this tool from artifacts and executes it.
-
-- In a case of issues, it is still possible generate images manually.
-
-
-## Usage Telemetry
-- This tool is internal only. Basic information about runs will be available from the pipeline history. We don't plan to include any additional data, unless they are requested. If we start experience problems with the image generation though, we might need to start gathering some reliability data.
-
-## Monitoring
-- Monitoring is based on result of the build pipeline. It will send email notification on any failure. If there is any issue, it should be picked up by FR.
-
-## FR Hand off
-- We will create documentation about
- - How to generate custom images with Visual Studio
- - How to use the tool in manual mode
- - What to do when pipeline fails the build
-
-- The policies of generating images overlaps with Matrix of Truth epic and will be further discussed with the epic owner.
-
-## Future outlook
-- Execution of the process should be possible to be done by vendors with minimal cost.
-
-
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cautomated-image-generation13997.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cautomated-image-generation13997.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cautomated-image-generation13997.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/bcm-emergency-staging-pipeline-optimizations-12261.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/bcm-emergency-staging-pipeline-optimizations-12261.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/bcm-emergency-staging-pipeline-optimizations-12261.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/bcm-emergency-staging-pipeline-optimizations-12261.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,124 +0,0 @@
-# Emergency staging pipeline optimizations
-
-## Goals
-
-As part of https://github.com/dotnet/arcade/issues/12261 we need to introduce emergency mode to Stage-DotNet and Stage-DotNet-Test pipelines that can be run to prepare a .NET release as fast as possible. Currently the critical stages in the pipeline depend on multiple validation stages (Source Code Validation, Validation, Required Validation) that run for a long time, which is one of the causes for the overall long run time of the pipeline. We need to implement a sped up version of the pipeline, skipping some of the validation jobs and running stages in parallel where possible.
-
-## Proposed Implementation
-
-The proposed implementation is introducing a "emergency switch" parameter in the Stage-DotNet pipeline. Stage dependencies can be rearranged according to this parameter. That will require no changes to the release pipeline that depends on the staging pipeline outputs. (Stage-DotNet is coded as a resourse in Release-DotNet-*). We can test the changes using the available testing pipeline (Stage-DotNet-Test) as it uses the same yaml.
-
-Another approach was discussed - creating a separate pipeline that will contain the stages in the emergency order. This approach would require changes to the release pipeline and would be harder to maintain as changes to Stage-DotNet would also need to be mirrored to the new pipeline and that can cause divergence from the original in the future. Therefore, the "emergency switch" parameter is the better option.
-
-The sequence of stages in the current pipeline is:
-
-```mermaid
-flowchart LR
- prep["Prep Ring \n ~30min"] --> prep_override[Prep Ring Override]
- prep_override --> signing["Signing Ring \n ~50min"]
- prep_override --> source_code_validation["Source Code Validation \n ~40min"]
- source_code_validation --> source_code_validation_override[Source Code Validation Override]
- signing --> required_validation["Required Validation \n ~1h"]
- signing --> validation["Validation \n ~5h"]
- required_validation --> sbom_generation["SBOM Generation \n ~20m"]
- required_validation --> required_validation_override[Required Validation Override]
- validation --> validation_override[Validation Override]
- required_validation_override --> publishing_v3_signed["Publish Post-Signing Assets \n ~1h20m"]
- required_validation_override --> post_signing_publishing["Publish Signed Assets \n ~1h30m"]
- required_validation_override --> vs_insertion["VS Insertion Ring \n ~50m"]
- sbom_generation --> sbom_generation_override[SBOM Generation Override]
- vs_insertion --> vs_insertion_override[VS Insertion Override]
- vs_insertion_override --> cti_sign_off[Wait for Test Team Sign Off]
- cti_sign_off --> staging["Staging Ring \n ~1h10m"]
- source_code_validation_override --> staging
- staging --> finalize_sign_off[Sign off for finalizing the release]
- finalize_sign_off --> finalize_staging[Finalize Staging Ring]
- finalize_sign_off --> publishing_v3_validated["Publish CTI Validated Assets \n ~1h20m"]
- finalize_staging --> handoff_sign_off_dotnetcsainternal[Approve Publishing to Dotnetcsainternal]
- handoff_sign_off_dotnetcsainternal --> handoff_publishing_dotnetcsainternal["Handoff Publishing Ring (dotnetcsainternal)"]
- classDef default fill:#50C878, stroke:#023020;
- classDef Override fill:#ECFFDC, stroke:#023020;
- class prep_override,source_code_validation_override,required_validation_override,sbom_generation_override,vs_insertion_override,validation_override Override;
-```
-*Light colored stages are manual validation that overrides another stage (e.g. Prep - Prep Override). They are run if the corresponding stage fails or succeeds with issues and skipped otherwise.
-
-If the parameter set to true, change the sequence of stages/jobs in the following way:
-
-```mermaid
-flowchart LR
- prep["Prep Ring \n ~30min"] --> signing["Signing Ring \n ~50min"]
- prep ---> source_code_validation["Source Code Validation \n ~40min"]
- signing --> vs_insertion["VS Insertion Ring \n ~50m"]
- signing --> required_validation["Required Validation \n ~1h"]
- signing --> validation["Validation \n ~5h"]
- signing --> sbom_generation["SBOM Generation \n ~20m"]
- signing --> publishing_v3_signed["Publish Post-Signing Assets \n ~1h20m"]
- signing --> post_signing_publishing["Publish Signed Assets \n ~1h30m"]
- signing --> cti_sign_off[Wait for Test Team Sign Off]
- cti_sign_off --> staging["Staging Ring \n ~1h10m"]
- staging --> finalize_sign_off[Sign off for finalizing the release]
- finalize_sign_off --> finalize_staging[Finalize Staging Ring]
- finalize_sign_off --> publishing_v3_validated["Publish CTI Validated Assets \n ~1h20m"]
- classDef default fill:#50C878, stroke:#023020;
-```
-
-- The Staging Ring
- - skip the `validate-staging-inputs` and `validate_staging_outputs` step
- - dynamically change dependencies to
- - prep
- - prep_override
- - signing
- - cti_sign_off
- - dynamically change conditions
-- VS Insertion Ring
- - skip `validate_vs_insertion_inputs` job
- - dynamically change dependencies to
- - prep
- - prep_override
-- Publish Signed Assets
- - depends on:
- - signing
- - add a condition that signing was successful
-- Publish Post-Signing Assets
- - depends on:
- - prep
- - prep_override
- - signing
- - remove required_validation conditions
-- Validation Ring - no changes
-- Sbom Generation Ring
- - depends on:
- - prep
- - prep_override
- - signing
- - no changes in conditions
-- Required Validation Ring - no changes
-- Source Code Validation - no changes
-- Signing Ring
- - skip `validate_signing_inputs` step
-- Prep Ring - no changes
-- Configure stages to have no override on failure
-
-## Risks
-
-- the release pipeline is closely connected to the staging - Stage-DotNet is listed as a resource in Release-DotNet from which build artifacts are being downloaded, the release pipeline also expects that assets are published to certain feeds and storage acccounts. We need to check that all assets it needs are being published with the expected changes and in the expected location.
-
-- make sure that the critical stages of Stage-DotNet have all the needed assets. It is possible that additional changes need to be made in the jobs consuming or publishing artifacts, but on a first glance validation stages don't produce outputs that are used in later stages.
-
-## Testing
-
-The change can be tested by running the Stage-DotNet-Test pipeline. It uses the same yml as Stage-DotNet but uploads assets to temporarily created feeds and blob storage containers so that we can run the pipeline multiple times without uploading to the official places. We should run it with the emergency switch on and off and make sure that
-- stages are run in the correct order in both emergency and regular scenario
-- adding the new parameter doesn't introduce any errors in the current staging process
-- all stages from the diagram are run so that:
- - all files produced by the staging pipeline are published (to feeds and blob storage containers) in both the emergency and regular scenario
- - the build artifacts that are used by the release pipeline are published (`config.json`, `manifest.json`, `signed/*`, `signalr-signed/*`, `release-manifests/*`)
-
-## Open Questions
-
-1. How does the change need to be communicated?
-
-2. Should we create a separate 1ES pool for the emergency pipeline to decrease wait time, also could we unite jobs with the same goal? (suggested by Djuradj)
-
-
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/core-eng-repository-migration-15084.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/core-eng-repository-migration-15084.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/core-eng-repository-migration-15084.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/core-eng-repository-migration-15084.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,90 +0,0 @@
-# Core-Eng Repository Migration
-
-It is discouraged to have repositories without code. Our documentation and issues are fragmented between multiple places, and it's hard to orient in it. To make things cleaner, it makes sense to migrate all valid items from core-eng repository and archive this repository.
-
-
-## Stakeholders
-
-* .NET Core Engineering Services team (contact: @dnceng)
-* .NET Core Engineering Partners (contact:@dncpartners)
-
-
-## Components to change
-
-This work consists of four parts:
-* Move repository files (documentation only)
-* Move Wiki
-* Code changes
-* Issues / Epics
-* Guidance for security issues
-
-### Move repository files (documentation only)
-This change includes:
- * [DevDocumentation](https://github.com/dotnet/core-eng/tree/main/DevDocumentation/DevWorkflow/Design) - contains new documentation for DevWorkflow. The whole folder should be moved.
- * [Docs](https://github.com/dotnet/core-eng/tree/main/docs) - some items seems to be obsolete, but there are recent (<=1 year) updates.
- * [Documentation](https://github.com/dotnet/core-eng/tree/main/Documentation) - the whole folder should be moved.
-
-For each document we need to find the best location. Internal documentation shouldn't go to the Arcade repository which is available for public. List of target locations:
- * [arcade documentation](https://github.com/dotnet/arcade/tree/main/Documentation).
- * [arcade-services documentation](https://github.com/dotnet/arcade-services/tree/main/docs).
- * [helix-service documentation](https://dev.azure.com/dnceng/internal/_git/dotnet-helix-service?path=/docs)
- * [helix-machines documentation](https://dev.azure.com/dnceng/internal/_git/dotnet-helix-machines).
- * [AzDO wiki](https://dev.azure.com/dnceng/internal/_wiki/wikis/internal.wiki/1/Home)
-
-Note: While moving to a new location, we need to update links in the existing code. The rest of the [repository core-eng](https://github.com/dotnet/core-eng) seems to be safe to be deleted.
-
-### Move Wiki
-
-The whole wiki should be migrated. There are two options for new placement:
-* [AzDO dnceng wiki](https://dev.azure.com/dnceng/internal/_wiki/wikis/internal.wiki/1/Home) - visible internally only
-* [Arcade wiki](https://github.com/dotnet/arcade/wiki) - available to everyone
-
-If we split content between public and internal location, fragmentation of our documentation will increase. This is the reason why we are planning to move the whole wiki into AzDO.
-
-### Code changes
-
-This is to change configuration to point against a different repository and to update ZenHub logic to a new representation of epic.
-
-Affected components:
-* Alerting under dotnet-arcade-services\src\DotNet.Status.Web
-* RolloutScorer under dotnet-arcade-services\src\RolloutScorer
-* Maestro under dotnet-arcade-services\src\Maestro
-* Async triage tool under dotnet-helix-service\src\async-triage-cli
-
-### Issues / Epics
-
-There are many issues created long time ago which probably aren't valid anymore. We should be automatically migrating issues that are newer than a year. Epics could be older and should be automatically migrated if they contain at least one issue that is newer than a year.
-
-We should flag issues with the Security or EOC labels as not to be automatically migrated. We don't want to announce security issues to the public before they are fixed.
-
-Note: No issues will be closed automatically. The core-eng repository will be archived so if required, any issue can be moved manually in the future.
-
-### Guidance for security issues
-
-All issues with the Security or EOC labels should not to be automatically migrated. Security issues must not be announced to the public before they are fixed. We need to come up with new guidance for security issues. One option would be to track them as AzDO work items.
-
-
-## Migration approach
-
-No item will be deleted as part of this change. Once all valid items are copied into a new location, the repository core-eng will be archived.
-
-## Rollout and Deployment
-
-* Affected components which points against core-eng repo has to be updated and deployed. See the list above.
-
-## Communication of changes
-
-FR has to be notified about:
-* new location of documentation
-* new location of alerts
-
-Partners have to be notified about:
-* new location of documentation
-
-## Monitoring
-
-All affected components have to be verified after deployment. No additional monitoring is required.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Ccore-eng-repository-migration-15084.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Ccore-eng-repository-migration-15084.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Ccore-eng-repository-migration-15084.md)
-
Binary files /tmp/tmpzei_s1f4/vtUhdeu1_U/dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/EpicDropDown.png and /tmp/tmpzei_s1f4/QIlsu0xnyE/dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/EpicDropDown.png differ
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/improved-container-image-lifecycle-arcade-10349.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/improved-container-image-lifecycle-arcade-10349.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/improved-container-image-lifecycle-arcade-10349.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/improved-container-image-lifecycle-arcade-10349.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,52 +0,0 @@
-# Improved Docker Container Image Lifecycle
-
-As part of [#10123](https://github.com/dotnet/arcade/issues/10123) to improve our docker container security and sustainability, we need to improve the container image lifecycle. Currently, our container definitions are stable, but rarely updated, with some of the definitions dating back several years. We don't have means to ensure that all container images we use contain the latest OS patches and CVE fixes. One of the main points of this proposal is to ensure that the containers are updated regularly, accepting servicing updates from the OS on a regular basis. The major business goals of this work are to make sure that:
-
-- Our container images are re-built regularly and they contain the latest underlying OS patches and CVE fixes
-- There is a mechanism for updating the docker containers used by product teams so that they are always on the latest version of each container image
-- There is a process and tools implemented for identifying and removing dockerfiles based on out-of-support base images
-- There is a process and tools to delete out-of-date container images (older than 3-6 months) from MCR
-- All images used in the building and testing of .NET use Microsoft-approved base images, either Mariner where appropriate, or [Microsoft Artifact Registry-approved images](https://eng.ms/docs/more/containers-secure-supply-chain/approved-images) where Mariner is insufficient
-
-## Stakeholders
-
-- .NET Core Engineering
-- .NET Acquisition and Deployment
-- .NET Product teams
-
-## Risks
-
-- Will the new implementation of any existing functionality cause breaking changes for existing consumers?
-
-The major risk in this portion of the epic is finding and updating all container usages by product teams, and making sure that moving them to the latest versions of the container images doesn't break their builds/tests because of missing artifacts. Our goal is to use docker tags to label the latest known good of each container image, and replace usages of specific docker image tags with a `---latest` tag. That way, much like with helix images, their builds and tests will be updated automatically when we deploy a new latest version. In the transition to latest images, we may find that older versions of a container may have different versions of artifacts installed on those containers, which could affect builds and tests. We will need to be prepared to help product teams identify these issues and work through them.
-
-- What are your assumptions?
- - The Matrix of Truth work will enable us to identify all pipelines and branches that are using docker containers and which images they are using
- - We will be able to extend the existing publishing infrastructure to also identify images that are due for removal
- - All of our existing base images can be replaced with MAR-approved images (we can already see where this is not true, OpenSuse and Raspian are not available as MAR-tagged images, and Alpine will be deprecated soon)
- - Most of the official build that is currently built in docker containers can be built on Mariner
- - MAR-approved images are updated with OS patches and CVE fixes
-
-- What are your unknowns?
- - What should we do about images that are not available as MAR-approved base images?
- - How will we identify the last known good (LKG) for each docker image?
- - What testing is currently in place for docker images, so that we can have confidence that updating the `latest` image will not break product teams?
- - What is the rollback story for the `latest` tagging scheme?
-
-- What dependencies will this epic/feature(s) have?
-
-This feature will depend heavily on MAR-approved images (and whatever updating scheme they have for updating base images), as well as the existing functionality for building and publishing our docker container images. We will want to expand the existing functionality to allow us to 1) identify the last known good of each docker image and 2) tag that LKG with a descriptive `latest` tag.
-
-## Serviceability of Feature
-
-### Rollout and Deployment
-
-As part of this work, we will need to implement a rollout story for the new tagging feature. We do not want every published image to immediately be identified as the production version. To add some testing time, we will create a production branch of `dotnet-buildtools-prereqs-docker`, and perform weekly rollouts alongside our helix-service, helix-machines, and arcade-services rollouts. When we roll all of the known good changes in the main branch to production, those new images will then be tagged as the production versions. Daily images built in main will be considered staging images and will be avaible to customers if they so choose. We will also need a rollback story so that if an image breaks a product team's build or test, we can revert to a previously working version of the image.
-
-### FR and Operations Handoff
-
-We will create documentation for managing the tags so that when a rollback needs to occur, FR will be able to make those changes. Additionally, we will create documentation and processes that can be used by Operations and/or the vendors to handle any manual OS/base image updating or removing of old and out-of-date images from MCR, as necessary. We will also create documentation for responding to customer requests for new docker images, including where to get the base images, how to install required dependencies (though that is coming in a different one pager), and what the process is for adding new images when a major version update is requested for dependencies.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cimproved-container-image-lifecycle-arcade-10349.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cimproved-container-image-lifecycle-arcade-10349.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cimproved-container-image-lifecycle-arcade-10349.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/IncreaseVisibilityHelixQueues/back-tested-accuracy-vs-time.svg dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/IncreaseVisibilityHelixQueues/back-tested-accuracy-vs-time.svg
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/IncreaseVisibilityHelixQueues/back-tested-accuracy-vs-time.svg 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/IncreaseVisibilityHelixQueues/back-tested-accuracy-vs-time.svg 1970-01-01 00:00:00.000000000 +0000
@@ -1,1086 +0,0 @@
-
-
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/IncreaseVisibilityHelixQueues/design-mockup-justin-impl.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/IncreaseVisibilityHelixQueues/design-mockup-justin-impl.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/IncreaseVisibilityHelixQueues/design-mockup-justin-impl.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/IncreaseVisibilityHelixQueues/design-mockup-justin-impl.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,76 +0,0 @@
-# Pipeline Status
-
-*The list of queues is cached weekly. If your PR changes what queues your pipelines use, this information will not show the updated queues.*
-
-Here's a list of the top 5 highest work item wait times:
-
-| Queue | Work Item Wait Time | Difference in Moving Avg |
-| ---------------------------------- | ------------------- | ------------------------ |
-| [`Windows.11.Amd64.Client.Open`]() | **43min 2s** | *+12%* 📈 |
-| [`Ubuntu.1804.Amd64.Open`]() | **43min 2s** | *-3.0%* 📉 |
-| [`Debian.11.Amd64.Open`]() | **43min 2s** | *+0.1%* 📈 |
-| [`Windows.11.Amd64.Client.Open`]() | **43min 2s** | *+1%* 📈 |
-| [`Windows.11.Amd64.Client.Open`]() | **43min 2s** | *-7%* 📉 |
-
-
-## Grafana Dashboard
-
-For more in-depth information on the status of Helix, visit our [Grafana Dashboard]().
-
-## Your Queues
-
-☁️ **dotnet/runtime** is currently configured to submit to the following Helix queues:
-
-* `Alpine.313.Amd64.Open`
-* `Alpine.313.Arm64.Open`
-* `Alpine.314.Amd64.Open`
-* `Alpine.314.Arm64.Open`
-* `Centos.7.Amd64.Open`
-* `Centos.8.Amd64.Open`
-* `Debian.10.Amd64.Open`
-* `Debian.10.Arm32.Open`
-* `Debian.11.Amd64.Open`
-* `Debian.11.Arm32.Open`
-* `Fedora.34.Amd64.Open`
-* `Mariner.1.0.Amd64.Open`
-* `OSX.1015.Amd64.AppleTV.Open`
-* `OSX.1015.Amd64.Iphone.Open`
-* `OSX.1015.Amd64.Open`
-* `OSX.1100.Arm64.Open`
-* `OSX.1200.ARM64.Open`
-* `OSX.1200.Amd64.Open`
-* `Raspbian.10.Armv6.Open`
-* `RedHat.7.Amd64.Open`
-* `SLES.15.Amd64.Open`
-* `Ubuntu.1804.Amd64`
-* `Ubuntu.1804.Amd64.Android.29.Open`
-* `Ubuntu.1804.Amd64.Open`
-* `Ubuntu.1804.ArmArch.Open`
-* `Ubuntu.2004.S390X.Experimental.Open`
-* `Ubuntu.2110.Amd64.Open`
-* `Ubuntu.2110.Arm64.Open`
-* `Windows.10.Amd64.Android.Open`
-* `Windows.10.Amd64.Client21H1.Open`
-* `Windows.10.Amd64.Server2022.ES.Open`
-* `Windows.10.Amd64.ServerRS5.Open`
-* `Windows.10.Arm64.Open`
-* `Windows.10.Arm64v8.Open`
-* `Windows.11.Amd64.Client.Open`
-* `Windows.7.Amd64.Open`
-* `Windows.81.Amd64.Open`
-* `Windows.Amd64.Server2022.Open`
-* `Windows.Nano.1809.Amd64.Open`
-* `openSUSE.15.2.Amd64.Open`
-
-🏢 **dotnet/runtime** uses the following on-prem queues:
-
-* `Some.OnPrem.Queue`
-* `Some.OnPrem.Queue2`
-* `Some.OnPrem.Queue3`
-
-*Was this helpful?* 👍👎
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5CIncreaseVisibilityHelixQueues%5Cdesign-mockup-justin-impl.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5CIncreaseVisibilityHelixQueues%5Cdesign-mockup-justin-impl.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5CIncreaseVisibilityHelixQueues%5Cdesign-mockup-justin-impl.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/IncreaseVisibilityHelixQueues/design-mockup.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/IncreaseVisibilityHelixQueues/design-mockup.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/IncreaseVisibilityHelixQueues/design-mockup.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/IncreaseVisibilityHelixQueues/design-mockup.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,113 +0,0 @@
-# Pipeline Status
-
-*The list of queues is cached weekly. If your PR changes what queues your pipelines use, this information will not show the updated queues.*
-
-## Average Times
-
-| Item | Average Time |
-| ----------------------- | ------------ |
-| Obtain build machine | **13m 4s** |
-| Helix tests to complete | **30m 2s** |
-| Pipeline to complete | **1hr 18m** |
-
-*These estimated times are computed from historical data and may not accurately reflect the current status of AzDo and Helix*
-
------
-
-Here's a list of the top 5 highest work item wait times:
-
-| Queue | Work Item Wait Time | Difference in Moving Avg |
-| ---------------------------------- | ------------------- | ------------------------ |
-| [`Windows.11.Amd64.Client.Open`]() | **43min 2s** | *+12%* 📈 |
-| [`Ubuntu.1804.Amd64.Open`]() | **43min 2s** | *-3.0%* 📉 |
-| [`Debian.11.Amd64.Open`]() | **43min 2s** | *+0.1%* 📈 |
-| [`Windows.11.Amd64.Client.Open`]() | **43min 2s** | *+1%* 📈 |
-| [`Windows.11.Amd64.Client.Open`]() | **43min 2s** | *-7%* 📉 |
-
-
-## Queue Insights
-
-❌ The queue [`OSX.1015.Amd64.Open`]() has a high wait time of Xhrs and Ymin.
-* **Your tests will likely timeout**.
-* Current queue count: **560** (*+57%* over moving average)
-* Current work item wait time: **5hr 28min**
-
-⚠️ Currently, [`Windows.10.Amd64.Client21H1.Open`]() is experiencing a higher than normal work item wait time.
-* Current queue count: **560** (*+57%* over moving average)
-* Current work item wait time: **2hr 5min**. (*+22%*)
-* There are no known issues with our infrastructure.
-* ❗**There is currently a known issue with our infrastructure.** [Details.]()
-
-✅ [`OSX.1200.ARM64.Open`]() has unusually low traffic.
-* Estimated time in queue: **3m 4s**. (*-34%*)
-
-## .NET Engineering Services Infrastructure Status
-
-| Product | Status |
-| -------------- | :----: |
-| Helix | ✅ |
-| Queues | ⚠️ |
-| On-Prem Queues | ❌ |
-
-See our [Helix status overview dashboard]().
-
-## Grafana Dashboard
-
-For more in-depth information on the status of Helix, visit our [Grafana Dashboard]().
-
-## Your Queues
-
-☁️ **dotnet/runtime** is currently configured to submit to the following Helix queues:
-
-* `Alpine.313.Amd64.Open`
-* `Alpine.313.Arm64.Open`
-* `Alpine.314.Amd64.Open`
-* `Alpine.314.Arm64.Open`
-* `Centos.7.Amd64.Open`
-* `Centos.8.Amd64.Open`
-* `Debian.10.Amd64.Open`
-* `Debian.10.Arm32.Open`
-* `Debian.11.Amd64.Open`
-* `Debian.11.Arm32.Open`
-* `Fedora.34.Amd64.Open`
-* `Mariner.1.0.Amd64.Open`
-* `OSX.1015.Amd64.AppleTV.Open`
-* `OSX.1015.Amd64.Iphone.Open`
-* `OSX.1015.Amd64.Open`
-* `OSX.1100.Arm64.Open`
-* `OSX.1200.ARM64.Open`
-* `OSX.1200.Amd64.Open`
-* `Raspbian.10.Armv6.Open`
-* `RedHat.7.Amd64.Open`
-* `SLES.15.Amd64.Open`
-* `Ubuntu.1804.Amd64`
-* `Ubuntu.1804.Amd64.Android.29.Open`
-* `Ubuntu.1804.Amd64.Open`
-* `Ubuntu.1804.ArmArch.Open`
-* `Ubuntu.2004.S390X.Experimental.Open`
-* `Ubuntu.2110.Amd64.Open`
-* `Ubuntu.2110.Arm64.Open`
-* `Windows.10.Amd64.Android.Open`
-* `Windows.10.Amd64.Client21H1.Open`
-* `Windows.10.Amd64.Server2022.ES.Open`
-* `Windows.10.Amd64.ServerRS5.Open`
-* `Windows.10.Arm64.Open`
-* `Windows.10.Arm64v8.Open`
-* `Windows.11.Amd64.Client.Open`
-* `Windows.7.Amd64.Open`
-* `Windows.81.Amd64.Open`
-* `Windows.Amd64.Server2022.Open`
-* `Windows.Nano.1809.Amd64.Open`
-* `openSUSE.15.2.Amd64.Open`
-
-🏢 **dotnet/runtime** uses the following on-prem queues:
-
-* `Some.OnPrem.Queue`
-* `Some.OnPrem.Queue2`
-* `Some.OnPrem.Queue3`
-
-*Was this helpful?* 👍👎
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5CIncreaseVisibilityHelixQueues%5Cdesign-mockup.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5CIncreaseVisibilityHelixQueues%5Cdesign-mockup.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5CIncreaseVisibilityHelixQueues%5Cdesign-mockup.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/IncreaseVisibilityHelixQueues/roslyn-CI.svg dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/IncreaseVisibilityHelixQueues/roslyn-CI.svg
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/IncreaseVisibilityHelixQueues/roslyn-CI.svg 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/IncreaseVisibilityHelixQueues/roslyn-CI.svg 1970-01-01 00:00:00.000000000 +0000
@@ -1,1603 +0,0 @@
-
-
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/increase-visibility-helix-queues-arcade8824.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/increase-visibility-helix-queues-arcade8824.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/increase-visibility-helix-queues-arcade8824.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/increase-visibility-helix-queues-arcade8824.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,153 +0,0 @@
-# Increasing visibility into the time builds & Helix tests take, and Helix errors
-
-## Goal and Motivation
-
-Our customers are dissatisfied with information about the status of the Helix queues in their given pipeline and the amount of time it takes waiting in queue to obtain a build machine.
-
-We currently have an awesome Grafana dashboard, but the discoverability is close to zero, and it doesn't clearly identify what is going on with the *overall* status of Helix. It does a great job of putting data into context (is this queue depth normal for this queue).
-
-The data it provides is too much for a typical dev. A dev's main concerns are:
-
-* How long will my build and tests take?
-* This queue is taking really long, is everything okay?
-* What's the overall status of Helix (outage, infrastructure issues, etc.)
-
-The dashboard doesn't answer these questions directly, but they can be *inferred* from the dashboard.
-
-We want a solution that puts this information right in front of devs.
-
-We can leverage our existing `BuildFailureAnalysis` projects to add another GitHub check that can present clear insights into the status of Helix, directly into the GitHub PR they are working on.
-
-
-### Stakeholders
-
-* .NET Core Engineering Services (contact: @dnceng)
-* Helix Customers (contact: @dncpartners)
-
-### Implementation
-
-**See [Design Mockup](IncreaseVisibilityHelixQueues/design-mockup.md) for a mockup of the overall, final design.**
-
-Our main goal is to add a new check, titled `Helix Queue Insights` that will be included directly in the PR a dev is working on.
-
-#### Subprojects
-
-This a list of all the features this one-pager is for.
-
-* Create a new check in the Github PR
-* Show the dev a list of queues (separated by on and off prem)
-* Show the top 5 queues with the highest work item wait time.
-* Create "insights" into the queues of their pipelines. Tell the dev that queues are:
- 1. **Extremely** high work item wait time. Is it likely this queue will timeout?
- 2. **Heavier** than usual wait time. This queue wait isn't normal and it will take more time for this queue.
- 3. **Lighter** than usual wait time. This test will run faster than normal!
-* Estimated times for:
- * Helix tests to complete
- * How long it will take for a build machine to be obtained
- * How long it will take for the entire pipeline to complete
-* Tracking how accurate our estimates are (from the Estimated Times feature)
-* A high level overview status of Helix
- * Are Helix, our on-prem and off-prem queues, etc. operating normally?
- * Red/Yellow/Green, similar to the [Azure DevOps Status](https://status.dev.azure.com/)
-* Draw a detailed graph of how builds and tests flow to show the user.
- * For example, the roots are the different builds and the leafs are different Helix queues
- * This will show the dev the overall hierarchy of their pipeline and allow them to optimize their pipeline by submitting jobs early to busy pools or reducing queues they use (thanks Stu)
-
-#### Projects in Scope
-
-In the coming 10 weeks of the internship, I will limit my scope to the following projects:
-
-* Create a new check in the Github PR
-* Show the dev a list of queues (separated by on and off prem)
-* Show the top 5 queues with the highest work item wait time.
-
-The result I'm looking to achieve is [this mockup](IncreaseVisibilityHelixQueues/design-mockup-justin-impl.md).
-
-**Stretch goals:**
-* Estimated times for helix tests and getting an AzDO build machine
- * Machine Learning
-
-
-### Technical Implementation Details
-
-1. Be notified of when a new PR is created.
- 1. Our existing code can already do this. Specifically, our `AnalysisProcessor` in our `BuildResultAnalysisProcess` microservice.
- 2. This needs to be changed to add a new Checkrun, as the Helix Queue Insights will be its own checkrun to avoid running into the 65k character limit. In addition, the Build Analysis page gets overwritten when any of the pipelines in the repo completes. We also don't want to mix build results with the status of Helix queues.
-
-2. Determine which queues the repo uses.
- 1. We will use the Matrix of Truth for this data.
- 2. Their data has is built from a job that Ilya mentioned at least updates once a week, and we can pull this information programmatically.
-
-3. Query the work item wait time and queue size for that pipeline's list of queues.
- 1. Currently Grafana has this data, with Kusto queries that we can pull and use.
- 2. We will simply pull the queries that Grafana uses them to present the data.
- 1. This creates a small issue of having the same query in two locations, the Grafana and this new feature. Stu and I discussed, and he mentioned that the queries haven't changed much (if at all) since the dashboard was created, so it should be okay.
- 2. The other option is to query Grafana for the data (which we can do), but then this creates an unnecessary tight dependency on Grafana,
-
-4. Process the data to compute moving average, and detect abnormalities.
- 1. This is interleaved with the previous step, but we'll need to compute the moving average using a Kusto query.
- 2. Compute the percent differences between the current work item wait time and the moving average.
-
-5. Calculate the time for the entire pipeline to complete
- 1. Build Analysis currently can determine what pipelines a PR will trigger
- 2. We can compute the max time for the pipelines (which is how long the CI will take), and compute the 95th percentile over a certain time period.
- 3. This will yield the time that the CI pipelines *usually* take.
-
-6. Build a model for the Markdown template & create the markdown.
- 1. We'll need to process the data from the Kusto queries into models that we can format the markdown handlebars template.
- 2. We'll also need to generate links to the Grafana dashboard for each queue.
- 3. Use the model to turn the handlebars template into markdown.
- 4. *It's also possible to include screenshots of the Grafana graphs to embed in the markdown. This should be noted and can be explored later.*
-
-7. Send the checkrun to GitHub.
- 1. The markdown will not be refreshed whenever the user opens the page, instead it'll be a "one-shot" when the PR is submitted.
-
-### Proof of Concept (POC)
-
-See https://github.com/maestro-auth-test/helix-queue-insights-test/pull/11/checks?check_run_id=6801735696
-
-### Risk
-
-- Will the new implementation of any existing functionality cause breaking changes for existing consumers?
- - No. This is a new feature that will not cause any breaking changes for our customers.
-- What are your assumptions?
- - The design mockup and the information it provides are beneficial to our customers and will allow them to get a high level overview of relevant information about the status of Helix.
-- What are your unknowns?
- - The level of satisfaction this will bring to our customers.
- - The accuracy of our data. Queue behavior can wildly fluctuate between outages and large test runs. Ideally we should have a system to track the accuracy of our predictions to have the data for our customers and improve our estimates.
-- What dependencies will this epic/feature(s) have?
- - Kusto
- - AzDo
- - Grafana
- - GitHub
- - Matrix of truth
-- Is there a goal to have this work completed by, and what is the risk of not hitting that date? (e.g. missed OKRs, increased pain-points for consumers, functionality is required for the next product release, et cetera)
- - August 2022. I've limited my scope of features I will work on to be able to deliver complete features.
-- Does anything the new feature depend on consume a limited/throttled API resource?
- - While not throttled, the preview pipeline API can take ~10 seconds to return.
- - While we are only planning this check run to be a snapshot when the PR is created, GitHub gets grumpy when updating a checkrun numerous times.
-- Have you estimated what maximum usage is?
- - No, but this type of feature is already implemented, and this project will extend off that.
-- Are you utilizing any response data that allows intelligent back-off from the service?
- - All API calls in this project are returned synchronously.
-- What is the plan for getting more capacity if the feature both must exist and needs more capacity than available?
- - This feature shouldn't require more capacity than I can handle.
- - If so, we can reduce the scope of the project and prioritize key items we think would be beneficial to the customer.
-
-### Usage Telemetry
-
-- How are we measuring the “usefulness” to the stakeholders of the business objectives?
- - After the feature is implemented and initial rolled out, we can ask members of the CI Counsel if the feature is providing usefulness, and things we can change to make it more useful.
- - We will also use the sentiment tracker from [`Helix.Utility.UserSentiment`](https://dev.azure.com/dnceng/internal/_git/dotnet-helix-service?path=/src/Utility/Helix.Utility.UserSentiment) to gather information on whether the new information is helpful.
-- How are we tracking the usage of this new feature?
- - The same way we track who has enabled .NET Build Analysis
- - There is currently no plan on tracking how many people have actually viewed this new checkrun.
-
-## Service-ability of Feature
-
-If the format of our database changes, we'll have to change the Kusto queries that this feature uses. In that event, our Grafana dashboard will also be broken.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cincrease-visibility-helix-queues-arcade8824.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cincrease-visibility-helix-queues-arcade8824.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cincrease-visibility-helix-queues-arcade8824.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/moving-daily-builds-off-dotnetcli-arcade5757.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/moving-daily-builds-off-dotnetcli-arcade5757.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/moving-daily-builds-off-dotnetcli-arcade5757.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/moving-daily-builds-off-dotnetcli-arcade5757.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,170 +0,0 @@
-# Moving daily builds off the dotnetcli storage account (Related to [Arcade epic #5757](https://github.com/dotnet/arcade/issues/5757))
-
-## Summary
-
-Prior to .NET Core 6.0, all internally-produced daily builds of .NET Core were Authenticode "real" signed and published to the same storage locations used for official releases of .NET Core. In latest builds, the same publishing locations continue to be used but non-final builds are not real-signed.
-
-While publishing to a single location is convenient for the infrastructure, it poses multiple risks. Every build that publishes ends up handling secrets that allow writing to the official storage account, making these builds sensitive security-wise. These builds' publishing infrastructure could also simply contain bugs and accidentally overwrite blobs they should not, or delete them simply by accident. To prevent this we should move daily builds onto a storage account (possibly in a different subscription entirely, but this is not necessary), and secrets that relate to publishing to the production account can be removed from almost all locations and be made available to a select few Azure DevOps build pipelines, and only when publishing real assets.
-
-As this will be a disruptive change, something to do at the same time that can improve safety would be to start using [the Azure Storage "Legal Hold" flag](https://docs.microsoft.com/en-us/rest/api/storagerp/blobcontainers/setlegalhold) for all containers with .NET Core builds in them. In experiments, this easily-reversed container-level change allows adding new blobs but blocks deleting or modifying existing ones. This means an extra layer of protection both from engineering errors (e.g. accidental overwriting, accidentally duplicating content) as well as meaning malicious users obtaining storage account credentials could not modify existing assets to contain their payloads.
-
-## Stakeholders
-
-- .NET Core Engineering Services team (contact: @dnceng)
-- .NET Core SDK team (contact: @marcpopMSFT)
-- .NET Core Docker team (contact: @MichaelSimons / @mthalman): The .NET Docker team produces Docker images on mcr.microsoft.com that preinstall .NET Core SDKs and may need to adjust this.
-- .NET Install Scripts team (contact: @vlada-shubina / @BOzturkMSFT): If a new location for .NET Core SDKs/ Runtimes is introduced, the install scripts should support this directly for non-official build
-- .NET Core PM Team (contact: @richlander) : Any change in the source of our runtimes / SDKs will need to have a public annoucement letting users know this is coming, as there are some number of external customers using daily builds in their existing development / CI process.
-- .NET Core Runtime teams (various; most other than SDK will uptake any change in behavior from Arcade.)
-- .NET Core Release team (contact: @leecow https://github.com/dotnet/release)
-- Anyone using dotnet-install to download daily bits
-
-## Risk
-
-- What are the unknowns?
- - We don't know outside common Arcade functionality where all dependencies on these storage account URIs are located. There will almost certainly be a multi-week tail of folks finding their build or dev workflow broken and pinging us from inside and outside the .NET org.
- - We actually don't know how many users actually use dailies (following up with @RichLander)
- - We don't know all the places where usage of dotnetcli URLs is hard-coded in calculating download urls, but there are definitely plenty.
-
-- Are there any POCs (proof of concepts) required to be built for this epic? _(note I used the "epic" one-pager; this might not be an epic)_
-
- There is nothing super groundbreaking about having multiple locations that one might find a build in, with a preference hierarchy for when the same content is found in more than one location. That said, here are some things I imagine we might want to try rigging up versions of the dotnet-install.sh / ps1 scripts that start looking in dotnetcli, then fall back automatically to the new build storage account (named something like "dotnetbuilds") to get feedback on whether this is satisfactory.
-
-- What dependencies will this epic have? Are the dependencies currently in a state that the functionality in the epic can consume them now, or will they need to be updated?
-
- None - it's new work to improve how and when we access secrets that expose our sensitive assets are, along with making it clear via simple base URI that a build is not official.
-
-- Will the new implementation of any existing functionality cause breaking changes for existing consumers?
-
- This change guarantees there will be some breaking changes for users who miss public communication about the changes to backing storage for .NET Core SDKs/Runtimes. I would alse expect to see some problems just arising from bugs and needing to run the "machinery" of our build/release project to catch all the places that this occurs. This can be mitigated in the short term by continuing to do the old behaviors (e.g. sign all binaries and publish to both locations) until we feel confident in the content being uploaded to the new storage account.
-
-- Is there a goal to have this work completed by, and what is the risk of not hitting that date? (e.g. missed OKRs, increased pain-points for consumers, functionality is required for the next product release, et cetera)
-
- Aside from the challenges listed, there is no specific date or milestone this work must be completed within. It should be done as quickly as safely possible for its benefits but is not tied to any particular milestone or product release date.
-
-## Open questions
-
-- Is this actually in the Unified build access epic? (dotnet/arcade#5757)
-
- I don't know, but I'm at least writing this one-pager as part of that epic.
-
-- Do we actually want external users to have access to unsigned .NET Core SDKs?
-
- Per discussion with @mmitche: "Yes; we're (implicitly) saying that MS has not made a statement about the quality or validity of it, and if the user (say, Bing.com) wants to ship it they have to sign it"
-
-- Same question, but do we want unsigned SDKs building official builds?
-
- Per discussion with @mmitche, it's unavoidable in certain cases, and while ideally we'd only use offically released previews, Barry says this is not a problem"
-
-- How can we actually determine how many external / internal users are using dailies?
-
-- Who owns the SDL validation and the new CDN that would likely be needed here (note: a sufficiently geo-redundant storage account can likely handle this performance-wise)
-
-- Is there an SDL Threat Model for dotnetcli?
-
- Nothing since 2016. It is likely something we must address.
-
-- Where should these resources live, and who should own them?
-
-## Components to change, with order/estimates of work to do
-
-### Component: dotnet-install scripts
-
- As these scripts are the most common entry point for installing .NET Core SDKs and runtimes, including for most DncEng infrastructure, updating these scripts would be the first step such that as soon as builds start publishing bits there, these scripts would continue to work as expected. Estimate: ~1 week.
-
- #### High-level activities:
- - Secure and create the storage account to use (note "dotnetbuild" is taken, perhaps "dotnetbuilds" or one of our pre-existing accounts like "dotnetbuildoutput")
- - Insert secrets for the new storage account into EngKeyVault for usage by builds.
- - Copy over (and munge versions to prove things worked) some builds from the DotNetCLI storage account into this account
- - Modify scripts in https://github.com/dotnet/install-scripts until they can correctly install from the new location (with preference "DotNetCLI -> New Storage account --> Version specified on command line).
-
-### Component: dotnet/arcade repo
-
- Shared functionality for fetching the dotnet-install scripts, as well as the "where to publish" logic lives in https://github.com/dotnet/arcade. This makes an obvious next place to address. Additionally, we could bring back logic created earlier to have dotnet-install.sh/ps1 in the eng/common folder of Arcade, allowing Arcade-ified repositories to use the latest script changes before any chance of impacting all other users publicly, or have a secondary location supported for the dotnet-install scripts.
-
- #### High-level activities:
- - Add variable group(s) containing secrets for publishing to the new storage accounts
- - Introduce the ability to publish daily builds to the new storage account consuming values from these variable groups.
- - Update mentions of storage accounts in documentation to reflect changes.
- - Remove existing referneces to dotnetcli-storage-key variables
-
-### Component: Partner teams (dotnet/sdk, docker, and other repos
-
- The .NET Core SDK team (and possibly others; we would search through all existing variable usage in their main builds and get a list) would need to make any related changes related to acquiring bits.
-
- #### High-level activities:
- - Work with SDK team to ensure relevant components know about possible new locations for bits.
- - Scan for all uses of dotnetcli in the main dev branches and start triaging them (understand and convert or update usage)
- - Work with Docker team to understand how they acquire .NET core installations for Docker image creation; this would likely just be a change to the infrastructure that finds URLs and calculates hashes of the bits to be installed in docker images, and ensuring it knows about the new account.
-
-### .NET Engineering Services tasks
-
- Aside from being responsible for driving this overall effort and all dotnet/arcade changes, the .NET Core Engineering Services team would be responsible for cleaning up after daily builds were migrated:
-
- #### High-level activities:
- - Remove entirely (or limit the pipelines allowed to use it) the DotNet-DotNetCli-Storage and DotNet-MSRC-Storage variable groups from dnceng.vs and devdiv.vs.
- - Remove all dotnetcli/dotnetclimsrc secrets from EngKeyVault and cycle storage keys (after dailies are working for some time)
- - Rig up the ability for official release pipelines to continue to publish to the "official" storage account.
- - Set Legal Hold flag on all real containers used for builds and make sure documentation reflects this (along with "how to break out of this temporarily if needed" docs)
- - Updates for publish destinations (i.e.) (PublishingConstants.cs). Change the target storage accounts to the dotnetbuildoutput/dotnetbuilds account.
-
-## Serviceability
-
-- How will the components that make up this epic be tested?
-
- Where functionality is changed in a repository, unit / scenario testing will be added. Most of the work is high-level configuration of Azure DevOps and builds so the testing will running the machinery in question. For the end-to-end "full .NET Core SDK is produced and uploaded to the new account" scenario, this will require a manual test plan to ensure that at least the happy-path works before turning functionality on.
-
-- How will we have confidence in the deployments/shipping of the components of this epic?
-
- We'll know things have successfully moved over when we disable / delete the secrets related to the dotnetcli storage accounts from shared key vaults, cycle these values, resolve outstanding issues, and continue to function.
-
-- Identifying secrets (e.g. PATs, certificates, et cetera) that will be used (new ones to be created; existing ones to be used).
- - Storage Account Keys (used for publishing), rotated via Azure Portal
- - Storage account container-specific SAS tokens, generated programmatically or via Azure Portal
-
-- Does this change any existing SDL threat model?
-- Does this require a new SDL threat model?
-
- The most recent threat model I am aware of dates back to 2016 and was written before Azure DevOps public support existed. While this change is largely meant to improve security, it's clear this area is due for some SDL review regardless of this change.
-
-
-### Rollout and Deployment
-- How will we roll this out safely into production?
-
- This may be difficult; it's impossible to know everything you don't know. However, making the storage-account choice conditional on build type and enabling it a little bit at a time make it relatively simple to undo if major problems are hit. Additionally, publishing to both locations in the beginning may represent the simplest way to proceed. This would be temporary, to allow us to rapidly switch back in case of problems.
-
- - Are we deprecating something else?
-
- No, all previously defined storage accounts for SDKs/Runtimes continue to exist, they only get fewer and more meaningful insertions, along with making blobs inherently immutable via the "Legal Hold" feature.
-
-- How often and with what means we will deploy this?
-
-Once adopted these changes are enshrined in the DotNet Arcade publishing workflow, so changes are deployed to where they're used via regular Arcade dependency flow pull requests.
-
-- What needs to be deployed and where?
-
- New storage accounts will be added to a subscription (TBD; this subscription will likely need to be treated "special"). Everything else comes from changes to Azure DevOps pipelines and the dotnet/arcade repo.
-
-- What are the risks when doing it?
-
- While change is ongoing, access to daily builds, the ability to produce new release builds, and .NET Core repositories' official pipelines may be broken for some time.
-
-
-## Usage Telemetry
-- How are we tracking the “usefulness” to our customers of the goals?
-
- As this is an improvement for both security and reliability, we don't care how useful these changes are for users, just that they can still perform their builds through some means.
-
-- How are we tracking the usage of the changes of the goals?
-
- Usage metrics would be based off the existing dotnet install telemetry (and likely this is where we'd have to go to know who is using non-release builds).
-
-
-## FR Hand off
-- What documentation/information needs to be provided to FR so the team as a whole is successful in maintaining these changes?
-
-Changes to the publishing workflow, specifically the components inside dotnet/arcade, will need to be detailed and stored in the wiki or documentation folders of dotnet/arcade.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cmoving-daily-builds-off-dotnetcli-arcade5757.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cmoving-daily-builds-off-dotnetcli-arcade5757.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cmoving-daily-builds-off-dotnetcli-arcade5757.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/native-tools-bootstrapping-security15522.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/native-tools-bootstrapping-security15522.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/native-tools-bootstrapping-security15522.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/native-tools-bootstrapping-security15522.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,101 +0,0 @@
-# Deprecating Native Tools Bootstrapping
-
-## Purpose
-The whole thrust behind native tools bootstrapping was that build machines should be
-clean and repos should bring their dependencies to the machines at build times. However,
-this presents a major security problem: how do we ensure that all these repos are keeping
-the executables they're installing on the build machines up to date? The current answer
-is that we simply aren't, but that needs to change.
-
-Going forward, **we will be installing all artifacts on the build machines directly via
-dotnet-helix-machines** (the machine prep process), except for the ones installed via package
-feeds. This is a departure from the "keep build machines clean" philosophy and from our work
-with folks on bootstrapping native dependencies, but it makes sense from a security and
-maintainability perspective.
-
-## The Current State of the World
-Based on [a search of the dotnet org](https://github.com/search?l=JSON&q=org%3Adotnet+native-tools&type=Code),
-there are currently 10 non-archived repos using native tools bootstrapping. The artifacts
-they're bootstrapping are as follows:
-
-| Artifact | Version(s) | Latest Patch Version(s) | Repos Using |
-|----------|------------|-------------------------|-------------|
-| cmake | 3.11.1, 3.14.5, 3.16.4, 3.21.0 | 3.11.4, 3.14.7, 3.16.9, 3.21.5 | arcade-validation, deployment-tools, installer, msquic, runtime, winforms |
-| cmake-test (fake artifact for testing) | 3.11.1 | N/A | arcade-validation |
-| dotnet-api-docs_netcoreapp3.0 | 0.0.0.2 | N/A | wpf-test |
-| dotnet-api-docs_net5.0 | 0.0.0.3 | N/A | winforms, wpf |
-| msvcurt-c1xx | 0.0.0.8 | N/A | wpf-test |
-| net-framework-48-ref-assemblies | 0.0.0.1 | N/A | wpf, wpf-test |
-| perl | 5.32.1.1 | 5.32.1.1 | fsharp |
-| python3 | 3.7.1 | 3.7.12 | deployment-tools, performance, runtime |
-| strawberry-perl | 5.28.1.1-1 | 5.28.2.1 | wpf, wpf-test |
-
-This indicates that four main executables are being bootstrapped:
-* CMake (multiple different minor versions)
-* Perl
-* Python 3
-* Strawberry Perl
-
-Additionally, Winforms and WPF rely on some zip files that probably should come directly from
-framework targeting packs, etc.
-
-Evidence of the necessity of this work can be found in the fact that Python 3.7.1 has
-[multiple CVEs](https://cve.mitre.org/cgi-bin/cvekey.cgi?keyword=python+3.7) against it.
-
-## Removing Native Tools Bootstrapping Direct Downloading
-The most straightforward way to deal with this problem is to simply stop natively bootstrapping artifacts
-onto the machines. This approach has two main advantages: first, it brings the security of the machines fully
-under the purview of the .NET Engineering Services team (which makes sense as we manage the machines
-in every other respect) and allows us to make sure these artifacts are kept up-to-date, and second,
-it simplifies the task of maintaining these artifacts for vendors (as there will be one central repository
-for the artifacts and one place to check for component governance alerts).
-
-It also has its drawbacks, however, which will be mentioned later in the document. However, it is the
-belief of the .NET Engineering Services team that these drawbacks do not outweigh the benefits of
-consolidating artifacts in dotnet-helix-machines.
-
-The basic outline of this process would be:
-
-1. Take stock of what native tools are currently being used, create artifacts for them, and install
- and add those artifacts to the build queues.
-2. Alert teams that we will be decommissioning native tools bootstrapping and installing tools like
- CMake and Perl on the build machines and give them time to migrate their builds to not use
- native tools bootstrapping anymore.
-3. Rework native tools scripts in Arcade to put the appropriate version of an artifact on the machine
- (e.g., a specific minor version of CMake) on the path.
-
-In the future, when a team needs a specific artifact, they can request it be added to the machines. We will
-vet the artifact through component governance before adding it to machines in production. If the artifact
-is going to be installed side-by-side along with other versions of the artifact (e.g., if they want to install
-CMake 3.21 and 3.11 is already on the machines), the requester will specify the version they want to use
-in the global.json under "native-tools" just like they do today. The bootstrapping scripts will then elevate
-the specific version of a tool to the path. However, when possible, we should attempt to consolidate on a
-single version to obviate the need for the use of these scripts.
-
-### Takebacks/Downsides
-This solution is, unfortunately, a takeback from previous solutions that involved investment in native
-tools bootstrapping (see our epic for [Harmonizing Arcade and Runtime Repos](https://github.com/dotnet/arcade/issues/6560)).
-Most obviously, this worsens the local dev experience which previously relied on native tools bootstrapping
-to make specific dependencies less necessary. Going forward, repos will instead have to specifically note
-which dependencies are required to build the product in their readme or COMPILING.md. The .NET Engineering
-Services team will send out reminders to update the versioning of these dependencies every time there is a
-version bump of an artifact. Eventually the plan is to use the matrix of truth (which knows which versions
-of artifacts are installed on images) to flow the exact versions being used on build machines to READMEs automatedly.
-
-This also reduces the control that repos have over their dependencies. The hope is that by allowing for side-by-side
-installation and minor version selection, repos will still be able to have control over minor versions of
-the tools they use while staying up to date with the latest patches.
-
-In order to avoid build breaks while still maintaining security, once we have settled on a minor version for an artifact,
-we will only be bumping its patch version as that is updated. This will restrict artifacts to security updates and bugfixes,
-minimizing build breakage. The only time when a minor or major version bump will be required is when a particular version
-reaches end-of-life and thus will no longer be maintained.
-
-## Conclusion
-While this is a significant departure from our previous thinking in the space, deprecating direct downloads via
-native tools bootstrapping and moving to a machine-only artifacts world will significantly improve the
-security and maintainability of machine artifacts going forward.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cnative-tools-bootstrapping-security15522.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cnative-tools-bootstrapping-security15522.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cnative-tools-bootstrapping-security15522.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/non-sdk-partial-releases-core-eng-14577.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/non-sdk-partial-releases-core-eng-14577.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/non-sdk-partial-releases-core-eng-14577.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/non-sdk-partial-releases-core-eng-14577.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,131 +0,0 @@
-# Non-SDK Partial Releases for 6.0
-
-## Summary
-
-In general, we only have two types of releases: full releases (which consist of sdk, runtime, aspnetcore, and windows desktop artifacts) and sdk-only releases (which only consist of the sdk, and rely on previously released versions of runtime, aspnetcore and windows desktop). However, in the 6.0 timeframe, we now find ourselves requiring a non-sdk partial release (i.e. a runtime-only or runtime and aspnetcore-only release), due to requirements for MAUI. Specifically, we will need to publish all of the workload packs and packages for this process.
-
-## Stakeholders
-
-* .NET Core Engineering Services team (contact: @dnceng)
-* .NET SDK team
-* .NET MAUI team
-* .NET Release team
-
-## Risks
-
-* What are the unknowns?
- * Today, it is unclear how to identify which files we will want to publish and how. Many are in the "workloads" directory for a given repo (runtime, emsdk), but the nupkgs are all in a flat directory structure. While we could publish all of the new nupkgs, it's unclear if we would actually want to. The safest option is to just publish all of the artifacts that are part of the release, rather than trying to pick and choose. This is what we should do, as it is simpler, safer, and gives us more flexibility for using this work in the future.
- * We do not know how much of the infrastructure around releases relies on there being an actual sdk version associated with a release. Depending on how much we can work around, we may be able to work within the current infrastructure, but it also might require a full rewrite of some of the initialize release work, including that around linux signing, building the release layout, generating the release metadata (if we want to do that, which is also an unknown), etc.
- * We do not know how much of the full release process we will require in a runtime-only release, and how that will interact with our scheduled releases. We want to make sure that we reuse and share as much of the current infrastructure and processes as we can; however, it may require some one off work that would only work for these non-sdk partial releases.
-
-* Are there any POCs (proofs of concept) that need to be built for this work?
-
- There is nothing particularly new that we should need to build for this work that doesn't already exist as part of the Stage-DotNet and Release-DotNet-5.x pipelines. While we will require some modifications to the current system that may necessitate a new pipeline on the release side, the staging pipeline should be able to be used mostly, if not completely, unchanged.
-
-* What dependencies will this work have? Are the dependencies currently in a state that the functionality in the work can consume them now, or will they need to be updated?
-
- This work depends on the work that the runtime, emsdk and other repositories have done to allow for releases of workloads (namely, the workloads directory that these repositories now produce). The work may also rely on a new naming schema for the workload nupkgs, if we decide we need to do that. Finally, the work relies on the Stage-DotNet and Release-DotNet-5.x pipelines that currently perform our releases, and the processes that those pipelines do today. These pipelines and/or processes will likely need to be updated in order to support this new type of release.
-
-* Will the new implementation of any existing functionality cause breaking changes for existing consumers?
-
- It should not. Wherever possible, we should add existing functionality that is exclusive to non-sdk partial releases under option flags that do not affect existing infrastructure.
-
-* Is there a goal to have this work completed by, and what is the risk of not hitting that date? (e.g. missed OKRs, increased pain-points for consumers, functionality is required for the next product release, et cetera)
-
- This work is required for MAUI to release pre-.NET 7. What will trigger the need for this work is a big runtime change that is too risky to immediately go into .NET 6 GA or servicing, but is required for MAUI. We will likely need this by early 2022. While this work is currently specifically for MAUI bring up today, it may likely be needed for shipping preview versions of servicing releases of .NET 6 with potentially risky changes.
-
-## Open Questions
-
-* What epic should this be part of?
-
- It is strongly recommended that this work go into its own epic
-
-* What needs to be updated?
-
- It's unclear, as the staging pipeline fails early (in linux signing), and everything later in the pipeline is gated on the signing stage. Once we have a clear picture of the staging pipeline, we can get a handle on what changes need to be done in the release pipeline.
-
-* Will this be treated differently than a full release or an sdk-only release?
-
- Probably, as we aren't going to be publishing everything from the drop, unlike what we do for full/sdk-only releases.
-
-## Components to change, with order/estimates of work to do
-
-### Component: Staging pipeline
-
-The Staging pipeline does the work of preparing the release, including gathering the drop, signing the assets, validating the assets, and creating the final release layout. Most of the pipeline is pretty robust and seems to handle well builds that don't contain sdk assets, however some of the pipeline relies on a fully formed release config.json, which we will not have in these releases.
-
-#### High-level activities:
-
-* Update the config file generation to allow us to get runtime version info from the runtime assets (and aspnetcore version from the aspnetcore assets), not just the sdk dependencies. We would like to, as much as possible, automatically detect what kind of release the pipeline has been asked to perform.
-* Update initialize-release to allow for config files that are missing versions for sdk, runtime, aspnetcore, etc.
-* Determine if we actually need the release metadata, or if that will be unnecessary for this sort of release.
-* Modify the yaml to skip anything that is unnecessary for these releases
-* Find all the places where we use the sdk version in the yaml, and for these releases, use something else (runtime version, for example). Using the sdk version is currently breaking some publishing code, in addition to the release initialization infrastructure.
-* Confirm that VS insertion continues to work properly for this scenario
-
-### Component: Release pipeline
-
-The release pipeline does the actual work of publishing the release, including creating additional metadata files, publishing nuget packages, publishing signed files to dotnetcli, updating aka.ms links, publishing symbols, publishing transport files to public locations, etc.
-
-#### High-level activities:
-* Walk through the entire pipeline to see what uses the config file, and how.
-* Confirm that anything that uses the release layout (i.e., what we drop to NET_CORE on vsufile), can handle files being missing.
-* Update whatever metadata file creation there is to accept a config file that is missing data that is not part of the release.
-* Confirm that repo-propagation will handle missing versions gracefully.
-* Determine how we will release only the workloads directory, rather than the entire drop.
-* Determine how we will identify and publish only those nupkgs that we are interested in releasing for this process.
-
-### Component: Product repositories
-
-The product repositories need to make sure that any workloads that need to be released as part of this process are published as part of the workloads directory. Many already do this today.
-
-#### High-level activities:
-* Make sure any repositories we need to be part of this work publish required assets to their workloads directories.
-* Where possible, have assets put in a subdirectory that identifies the sdk release version the assets should be associated with
-* Potentially rename required nupkgs to identify them as Workload packages, like we do the VS.* packages for other releases.
-
-## Serviceability
-
-* How will the components that make up this epic be tested?
-
-Like the staging pipeline, we will have a test pipeline and BAR Build ID that we can run through the full pipeline for testing. This should be run prior to any main testing and will run on production rollouts. Additionally, all new functionality will have unit tests added to test that they're doing what they are supposed to and don't break prior behavior.
-
-* How will we have confidence in the deployments/shipping of the components of this work?
-
-By using the test pipelines prior to deployment.
-
-## Rollout and Deployment
-
-* How will we roll this out safely into production?
-
-By using the test pipelines with known good builds.
-
-* How often and with what means will we deploy this?
-
-Weekly, along with the rest of dotnet-release, or as needed, if weekly is too frequent.
-
-* What needs to be deployed and where?
-
-Any code related to the staging and release pipelines. We will deploy to the production branch (and potentially later, the release/6.0 branch) from main.
-
-* What are the risks when deploying?
-
-We may break the main pipeline for full releases if we do not do enough testing. With two/three different scenarios that require testing, we may need to run the full test pipeline for each of these types of releases. Instructions for validating any changes made to the pipeline can be found [here](https://github.com/dotnet/arcade/blob/main/Documentation/Staging-Pipeline/making-and-validating-changes.md).
-
-## FR Handoff
-
-* What documantion/information needs to be provided to FR so the team as a whole is successful in maintaining these changes?
-
-Changes to the staging pipeline are already documented in the Staging-Pipeline docs, however, we will likely want to add additional information about the new scenarios.
-
-## Useful Release-Related Documentation and Links
-
-* [Stage-DotNet](https://dev.azure.com/dnceng/internal/_build?definitionId=792)
-* [Release-DotNet-5.x](https://dev.azure.com/dnceng/internal/_build?definitionId=984)
-* [Running the Staging Pipeline](https://github.com/dotnet/arcade/blob/main/Documentation/Staging-Pipeline/running-the-pipeline.md)
-* [Original Release Rings Plan](https://github.com/dotnet/arcade/blob/main/Documentation/ReleaseRingsPlan.md)
-* [releases.json](https://github.com/dotnet/core/blob/main/release-notes/6.0/releases.json)
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cnon-sdk-partial-releases-core-eng-14577.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cnon-sdk-partial-releases-core-eng-14577.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cnon-sdk-partial-releases-core-eng-14577.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/pipeline-machine-learning-arcade8824.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/pipeline-machine-learning-arcade8824.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/pipeline-machine-learning-arcade8824.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/pipeline-machine-learning-arcade8824.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,158 +0,0 @@
-# Predicting Pipeline Durations
-
-Our customers would love to know how long their CI pipeline takes. Ideally, we would want to build a model that can account for the current state of Helix, the work items they send, the congestion of AzDo, etc. to get a decent accurate. Unfortunately, this is a highly complex model that is quite difficult to build (but theoretically possible).
-
-Instead, we can build a model to give us a range of estimates, based on the past durations of their pipelines.
-
-We have tons of historical data that can enable us to make predictions and generate confidence intervals on the length of CI pipelines.
-
-## Stakeholders
-
-* Our Partners (@dncpartners)
-* .NET Engineering Services (@dnceng)
-
-
-## Proof of Concept
-
-*[Here is a jupyter notebook](https://ml.azure.com/fileexplorerAzNB?wsid=/subscriptions/a4fc5514-21a9-4296-bfaf-5c7ee7fa35d1/resourcegroups/t-jperez/workspaces/HelixMLTest&tid=72f988bf-86f1-41af-91ab-2d7cd011db47&activeFilePath=Users/t-jperez/pipeline-machine-learning-arcade8824.ipynb) with the detailed statistics and data science that supports the following claims.*
-
-By plotting a histogram of pipeline durations for specific pipelines, I've determined that they seem to follow distributions that we can model. For instance, here is the `roslyn-CI` pipeline, with a dweibull distribution fitted.
-
-
-
-With this distribution, we can compute the 95% confidence interval, which for this pipeline, is:
-
-`1:11:27 +- 0:27:57`
-
-We can vary the confidence interval, and thus the accuracy of our predictions, for a smaller range. From testing on all pipelines that have Build Analysis enabled, here are the detailed statistics for the *ranges* of predictions we give, in seconds. This data is back-tested, meaning that at the time the range was computed, the model only had the data available previously.
-
-```
-count 534.000000
-mean 69.490963
-std 415.535966
-min 3.197592
-25% 12.124280
-50% 19.041886
-75% 27.074249
-max 6289.076812
-dtype: object
-```
-
-### Model Accuracy
-
-With a target of 95% accurate, the back-testing concluded that we had a true accuracy of 93.3%.
-
-We backtested the model by training on all previous data before a point, and then testing on 1 week ahead, on data the model has not seen before. Here is a graph of the accuracy over time.
-
-
-
-The dashed red line shows the target, 95% accurate predictions. Our predictions hold accurate, at worst dipping to just below 89%, and hovering between 90% and 95%.
-
-This data is an aggregation of accuracy vs time for all repos with Build Analysis. When evaluating the accuracy, the data point in question was not used to fit the distribution, preventing a look-ahead bias. Accuracy is defined as:
-
-$\frac{\text{points in predicted range}}{\text{total points}}$
-
-### Implementation
-
-A unique distribution for each pipeline requires a training for each pipeline, and this functionality would more than likely require an Azure Function. The added burden of maintenance and engineering machinery makes this too cumbersome.
-
-Instead, we can use [Chebyshev's inequality](https://en.wikipedia.org/wiki/Chebyshev's_inequality), because we have the mean and variance defined for each pipeline. Using ${\displaystyle k={\sqrt {2}}}$ shows that the probability that values lie outside the interval ${\displaystyle (\mu -{\sqrt {2}}\sigma ,\mu +{\sqrt {2}}\sigma )}$ does not exceed ${\displaystyle {\frac {1}{2}}}$. At the time of writing, here is a list of the ranges we'd give customers who have Build Analysis enabled:
-
-
-| Definition | Mean | Confidence Interval |
-| -------------------------------------------------- | ------------ | ------------------- |
-| \dotnet\arcade\arcade-ci | 45m 14s | ± 10m 41s |
-| \dotnet\arcade-services\dotnet-arcade-services | 25m 8s | ± 5m 26s |
-| \dotnet\arcade-validation\arcade-validation-ci | 13m 24s | ± 6m 17s |
-| \dotnet\aspnetcore\aspnetcore-ci | 1hr 32m 8s | ± 27m 56s |
-| \dotnet\aspnetcore\aspnetcore-components-e2e | 33m 58s | ± 3m 11s |
-| \dotnet\aspnetcore\aspnetcore-quarantined-pr | 1hr 4m 4s | ± 17m 7s |
-| \dotnet\installer\installer | 57m 22s | ± 36m 3s |
-| \dotnet\performance\performance-ci | 47m 41s | ± 22m 7s |
-| \dotnet\roslyn\roslyn-CI | 1hr 10m 41s | ± 19m 22s |
-| \dotnet\roslyn\roslyn-integration-CI | 1hr 22m 24s | ± 12m 23s |
-| \dotnet\roslyn\roslyn-integration-corehost | 1hr 22m 56s | ± 11m 2s |
-| \dotnet\runtime\dotnet-linker-tests | 59m 55s | ± 13m 55s |
-| \dotnet\runtime\runtime | 2hrs 10m 52s | ± 1hr 21m 38s |
-| \dotnet\runtime\runtime-coreclr outerloop | 4hrs 20m 24s | ± 57m 37s |
-| \dotnet\runtime\runtime-coreclr superpmi-asmdiffs | 1hr 20m 26s | ± 14m 10s |
-| \dotnet\runtime\runtime-coreclr superpmi-diffs | 1hr 53m 35s | ± 19m 24s |
-| \dotnet\runtime\runtime-coreclr superpmi-replay | 1hr 38m 15s | ± 17m 26s |
-| \dotnet\runtime\runtime-dev-innerloop | 1hr 21m 38s | ± 8m 16s |
-| \dotnet\runtime\runtime-extra-platforms | 2hrs 23m | ± 52m 44s |
-| \dotnet\runtime\runtime-libraries enterprise-linux | 41m 48s | ± 7m 57s |
-| \dotnet\runtime-assets\runtime-assets | 2m 5s | ± 35s |
-| \dotnet\sdk\dotnet-sdk-public-ci | 59m 1s | ± 16m 15s |
-
-We can use a Kusto Query like so and show this information in Queue Insights. (Here, I use Definition name for clarity, but I will use the DefinitionId in production)
-
-```
-TimelineBuilds
-| where Project == "public"
-| where Reason == "pullRequest"
-| where TargetBranch == "main"
-| where Result == "succeeded"
-| extend PipelineDuration = datetime_diff('second', FinishTime, StartTime) * 1s
-| project-keep Definition, PipelineDuration
-| join kind=inner (
- TimelineBuilds
- | where Project == "public"
- | where Reason == "pullRequest"
- | where TargetBranch == "main"
- | where Result == "succeeded"
- | project Definition, PipelineDuration = datetime_diff('second', FinishTime, StartTime) * 1s
- | summarize
- Bottom5 = percentile(PipelineDuration, 5),
- Top95 = percentile(PipelineDuration, 95),
- Count = count()
- by Definition
- | where Count >= 30)
- on Definition
-| where PipelineDuration between (Bottom5..Top95)
-| summarize Mean = avg(PipelineDuration), ConfidenceInterval = sqrt(2) * totimespan(stdevp(PipelineDuration)) by Definition
-```
-
-*Thanks to Nikki ([@mathaholic](https://github.com/mathaholic)) for her help with this data analysis, and her idea to use Chebyshev’s Theorem!*
-
-### Caveats
-
-#### Multimodal Distributions
-
-There are some issues with this model. First, some pipelines, like `dotnet/runtime`'s have a multimodal distribution. This means we cannot accurately predict their pipeline duration. In this case, their distribution is multimodal because their first step, `Evaluate Paths` evaluates the changes in a given PR, and runs or skips different steps of their pipeline.
-
-We will hide the estimated time and instead inform the user that their pipeline cannot be predicted as it is too variable. This will also handle the case where the range of a CI pipeline exceeds the estimated time (*e.g.* `1min ± 5min`)
-
-#### Infrastructure Outages
-
-In addition, there is the issue of AzDo, Helix, or builds being on the floor, and we still give customers an estimate, blissfully unaware of any infrastructure errors. In the Juptyer notebook, I dive into an anomaly detection model, based on Helix work item wait times trying to predict this, but the model only improves accuracy by 0.3%.
-
-
-For Helix and/or AzDo being on the floor, we will rely on our Known Issues infrastructure, and simply hide the checks when there are any critical infrastructure issues.
-
-## Risk
-
-* Will the new implementation of any existing functionality cause breaking changes for existing consumers?
- * No, this will be a new feature.
-* What are your assumptions?
- * We'll constantly maintain data in the `TimelineBuilds` table. This feature depends on it.
-* What are your unknowns?
- * See above, how do we decompose multimodal distributions and become notified when services are down?
-* What dependencies will this epic/feature(s) have?
- * `TimelineBuilds`
- * Kusto
-* Are the dependencies currently in a state that the functionality in the epic can consume them now, or will they need to be updated?
- * We can consume them now.
-* Is there a goal to have this work completed by, and what is the risk of not hitting that date? (e.g. missed OKRs, increased pain-points for consumers, functionality is required for the next product release, et cetera)
- * Aug 12, the end of the internship.
-* Does anything the new feature depend on consume a limited/throttled API resource?
- * No.
-* Have you estimated what maximum usage is?
- * No, but it wil be the same as the Queue Insights project.
-* Are you utilizing any response data that allows intelligent back-off from the service?
- * We only query Kusto, so there is no need for back-off.
-* What is the plan for getting more capacity if the feature both must exist and needs more capacity than available?
- * This feature wont require any additional capacity.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cpipeline-machine-learning-arcade8824.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cpipeline-machine-learning-arcade8824.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cpipeline-machine-learning-arcade8824.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/pr-failure-tagging-one-pager-core-eng-12136.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/pr-failure-tagging-one-pager-core-eng-12136.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/pr-failure-tagging-one-pager-core-eng-12136.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/pr-failure-tagging-one-pager-core-eng-12136.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,169 +0,0 @@
-## Automated GitHub tagging for failed Pull requests from Maestro++ dependency flow
-
-### Limitations:
-
-- This functionality only makes sense when the subscriptions involved are non-batched, and documentation will call this out. We only want to tag a partner team if we're fairly sure that it's worth someone from the source repo to take a look.
-- We won't be able to detangle whether the root cause of a failure is the target repo build's flakiness, an intentional breaking change from the source repo that was advertised via email, or a break from changes that lack test coverage from the source repository, so a list of these posssibilities will be part of the message where source repos get tagged.
-
-See Epic: https://github.com/dotnet/core-eng/issues/12136
-
-### Stakeholders
-
-While we hope that the features added in this epic provide benefit to any "target" repo of dependency flow, the primary stakeholder currently is the .NET SDK team. They exist at the end of the metaphorical dependency-flow river, so they experience potential issues with dependency updates from every layer in the stack, possibly in a synergistic fashion (i.e. the regression requires two or more repos' changes).
-
-### Risk
-
-- What are the unknowns?
-
- Unknowns are minimal for the work as described; the main being simply "will this improve how long it takes to look at a PR failure by the source repository of the build change?
-
-- Are there any POCs (proof of concepts) required to be built for this epic?
-
- No. This will be adding functionality to existing components that already do most of the related behaviors; after review it looks like every challenging thing done already has some component that could be augmented to support this.
-
-- What dependencies will this epic have? Are the dependencies currently in a state that the functionality in the epic can consume them now, or will they need to be updated?
-
- None. All work will occur in the arcade-services repo (unless we decide to store the mappings in another)
-
-- Will the new implementation of any existing functionality cause breaking changes for existing consumers?
-
- No breaking changes; the idea is simply to add logging and tagging to an existing semi-mature process.
-
-- Is there a goal to have this work completed by, and what is the risk of not hitting that date? (e.g. missed OKRs, increased pain-points for consumers, functionality is required for the next product release, et cetera)
-
- Goal is to finish by end of April 2021. Slipping the date has no associated risk, just means that the SDK team and other "end-of-the-line" repos will continue to operate as they are currently for some time.
-
-### Serviceability
-
-- How will the components that make up this epic be tested?
- - Add tests to existing unit tests in the area (expanding on existing)
- - Arcade-services scenario test. (Tests using checks already exist, will be an exercise in code reuse for this somewhat similar scenario.)
-
-- How will we have confidence in the deployments/shipping of the components of this epic?
-
- Regularly run scenario and unit tests that exercise this code path always run before deployment to production.
-
-- Identifying secrets (e.g. PATs, certificates, et cetera) that will be used (new ones to be created; existing ones to be used).
-
- No new secrets will be needed by the change.
-
-- Does this change any existing SDL threat or data privacy models? (models can be found in [core-eng/SDL](https://github.com/dotnet/core-eng/SDL) folder)
-- Does this require a new SDL threat or data privacy models?
-
- No; the only PII used will be GitHub aliases / tags and this will be augmenting existing functionality.
-
-- Steps for setting up repro/test/dev environments?
-
- Same as https://github.com/dotnet/arcade-services/blob/main/docs/DevGuide.md
-
-#### Rollout and Deployment
-
-This section left blank as this will be part of an arcade-services component.
-
-### Usage Telemetry
-- How are we tracking the “usefulness” to our customers of the goals?
-
- Since there can be lots of reasons a PR is merged quickly or slowly, pure data metrics likely do not help here unless we somehow had many more data points. However, we can monitor:
- - Feedback from SDK and other end-of-the-line repo team members
- - Whether we observe incidence of an upstream repo team member taking action before the downstream team raises the issue with them.
-
-- How are we tracking the usage of the changes of the goals?
-
- - We already use Application Insights for telemetry. The idea here will be to start tracking the success rate % of PRs from a given repo to another. By simply writing down source, destination, and success of the PR build we'll be able to identify which teams flow problems out of their repositories. It should be straightforward to make a "top N list" of repos whose changes break others, and dig in to the data from there.
-
-### Monitoring
-
- Already covered by the Dependency Flow error processor and Grafana alerts.
-
-### FR Hand off
-- What documentation/information needs to be provided to FR so the team as a whole is successful in maintaining these changes?
-
- The location of mappings for Repository URL -> GitHub Tag, and instructions to update these.
-
-### Description of the work:
-
-#### Components changed:
-
-- New function: TagSourceRepositoryGitHubContacts()
- - Reads config file from below, ensure a message is created in the PR to come check out the failure. Will reuse as much as possible from existing GitHub helper functions in Pull Request Actor.
-
-- Changes to [PullRequestActor](https://github.com/dotnet/arcade-services/blob/main/src/Maestro/SubscriptionActorService/PullRequestActor.cs)
- - NonBatchedPullRequestActorImplementation changes:
- - Pass along a flag inside its overload of SynchronizeInProgressPullRequestAsync() to indicate that repo-tagging should occur (this can only happen in non-batched, since it's not feasible to detangle who broke what in a batched update)
- - This is needed to ensure that UpdatePullRequestAsync() gets called to check
- - PullRequestActorImplementation changes:
- - UpdatePullRequestAsync() would check first if it was being called inside a non-batched PR actor, and if so it would also check the existing field for the interesting state (`InProgressPullRequest.MergePolicyCheckResult == MergePolicyCheckResult.FailedPolicies`)
- - Before continuing to updating the PR with new commits it would fetch the `dependency-flow-failure-notifications.json` file described below (or just have it as a local asset in the Maestro++ service), check to see if any failure tagging is requested, and apply a comment to the issue with all unique tags which have been requested.
-
- - SynchronizePullRequestAsync already has an entry in [its switch statement](https://github.com/dotnet/arcade-services/blob/main/src/Maestro/SubscriptionActorService/PullRequestActor.cs#L424-L426), so we'd have a different actionresult added for this case.
- - For the `case MergePolicyCheckResult.FailedToMerge:` entry, we'd add a new enum value to SynchronizePullRequestResult (e.g. "InProgressCanUpdateNeedsNotification")
- - At this point SynchronizeInProgressPullRequestAsync calls the new function (TagSourceRepositoryGitHubContacts()) to ensure that a boilerplate message is pasted in with the tags from the config file.
- - Telemetry changes:
- - Send telemetry with source and destianation repos whenever a non-batched PR is created.
- - Whenever we tag an issue due to failed checks, send additional piece of telemetry with the same information indicating failure
- - Could just send once at the very end, but this might miss sending check failure telemetry on PRs that get manually fixed up to pass, or are manually merged.
-
-- Changes to allow users to self-service dependency-flow failure notifications:
- - Introduce a new file, say `https://github.com/dotnet/arcade-services/blob/main/dependency-flow-failure-notifications.json` sample content:
-
-``` json
- {
- [
- // Example showing options for disabling and specifying particular channels
- {
- // Notify @dotnet/runtime-infrastructure for any runtime content coming from the .NET 6 channel
- // targeted to the Sdk and ASP.NET Core teams where PRs fail.
- "SourceGitHubRepoUrl": "https://github.com/dotnet/runtime",
- "TargetGitHubRepoUrls":
- [
- "https://github.com/dotnet/sdk",
- "https://github.com/dotnet/aspnetcore",
- ],
- "NotificationSettings": [{
- "GitHubTagsToNotify": [
- "@dotnet/runtime-infrastructure"
- ],
- "Channels": [
- ".NET 6",
- ]
- },
- // Notify a different tag for servicing branches
- {
- "GitHubTagsToNotify": [
- "@dotnet/runtime-infrastructure-servicing"
- ],
- "Channels": [
- ".NET 5",
- ".NET 3"
- ],
- // leave the ability to disable in case failures are expected
- "Enabled": false
- }
- ]
- },
- // Minimal example: always alert on all channels for PRs made from Nuget.Client -> Dotnet SDK
- {
- "SourceGitHubRepoUrl": "https://github.com/nuget/nuget.client",
- "TargetGitHubRepoUrls":
- [
- "https://github.com/dotnet/sdk"
- ],
- "NotificationSettings": [{
- "GitHubTagsToNotify": [
- "@nuget/nuget-infrastructure",
- ]
- }
- ]
- }
- ]
- }
-```
-
-#### Notes
-- All subscriptions to GitHub PR failure tagging must be OK with the "source" repo. That is, target repositories causing failure notifications for teams that do not want these notifications will have them removed. A blurb to this effect will be part of the failure notification tag boilerplate message.
-- See related feature request: https://github.com/dotnet/arcade/issues/7102
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cpr-failure-tagging-one-pager-core-eng-12136.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cpr-failure-tagging-one-pager-core-eng-12136.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cpr-failure-tagging-one-pager-core-eng-12136.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/retrying-stages-staging-pipeline.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/retrying-stages-staging-pipeline.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/retrying-stages-staging-pipeline.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/retrying-stages-staging-pipeline.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,157 +0,0 @@
-# Retrying Stages to Address Errors in Staging pipeline
-
-## What is the current process?
-
-Today we must run the entire Staging pipeline if we encounter any errors in the pipeline.
-For eg: When Package push fails, we need to rerun everything to fix the bug.
-
-There is no way to test the pipeline with fewer stages. Since most of the later stages are dependent on earlier stages that takes a lot of time to complete.
-For eg: If we want to test only the Required Validation stage, we will need to run Prep and Signing.
-
-## Problems with current process:
-1. Rerunning a single stage in the pipeline is not possible without executing the entire pipeline again.
-1. Running only the stage where changes were implemented is not feasible.
-1. Re-executing the entire process and testing consumes a considerable amount of time.
-1. Staging pipeline is complicated to understand, we need a simpler process.
-
-## Different approach for staging pipeline:
-
-| Approach | Pros | Cons |
-|----------|------|-------|
-| 1. We add some parameters in the pipeline, and make staging pipeline download the pipeline assets uploaded in the Signing stage of build with build id 12345 in the new pipeline run. Since the assets are already signed we just run required validation and publishing stage only We will need build id of the successful build. We will use azure apis to download the pipeline artifacts |
Easier to test
We can pick and choose which stages to rerun
Relatively easy to implement
Release pipeline is doing something similar |
Seems primitive and hacky |
-| 2. Pipeline should be intelligent enough, like if we rerun the pipeline with the same BarId, it should look up the data in timeline builds and if the stage was previously successful from previous runs it should just not do anything. Like here if prep ring was successful, so it would skip prep and go to signing. Signing was already successful so now it will skip this stage and just download assets from the build with build if 12345. Then eventually just run Required Validation and publishing stage |
Lot of code change in every stage
Timeline data is in Kusto and they sometime take time to show up in kusto so prone to errors(waiting can help) |
Need to capture details which build ran with which barid
Seems hacky
Adds non determinism based on time, availability of systems. |
-| 3. Create a cache to cache the Signed assets, so next time we re run the pipeline for the same barID, We look for signed assets in the cache. The assets are hashed, so if there is no change in the bits, we do not have to run that stage. So in case required validation was previously successful for the signed assets hash then skip this stage.|
Lot cleaner |
We may be adding complexity to our current infra
Have to maintain cache
Need to add an explicit ovveride when we need to re-sign.
Does not save a lot of time in 7.0/8.0 where in-build signing reduces the time in the signing stage. |
-| 4. Split the pipeline into 2
Anything that alters the artifacts ( Signing)
Validation and publishing |
.Net 9 we are moving towards moving signing (anything that alters/creates the build artifacts is going to be moved) to main builds, so by splitting this we will align to work with .Net 9 ( which is awesome!)
Making the pipeline lot less complex
We can introduce testing infra (inject DI in the second pipeline, publish to test vs prod containers)
Makes the whole pipeline simpler
If we want to fix something in the second pipeline we don't have to rerun the first one. So fixes are and flaky test reruns are lot faster and easier. |
Have to maintain 2 pipeline
BCM work has to be reevaluated.|
-
-
-## Goal and Motivation
-
-After carefully analyzing the pros and cons of all the above approaches, we decided to go with splitting the pipeline.
-
-The proposal here is to split the Staging Pipeline into two different pipelines and add the ability to rerun stages. Thinking of this work as 2 part process.
-#### Version 1 (V1):
-1. First pipeline will contain anything that alters or generates the artifacts (Eg: Signing, SBOM generation).
-1. Second pipeline will contain Validation and publishing to various storage accounts.
-#### Version 2 (V2):
-1. Ability to add rerun stages in the Second pipeline
-
-## Stakeholders:
-1. Tomas team
-1. Release team
-
-### Current Flow
-
-```mermaid
-flowchart LR;
- prep["Prep Ring \n ~30min"] --> prep_override[Prep Ring Override]
- prep_override --> signing["Signing Ring \n ~50min"]
- prep_override --> source_code_validation["Source Code Validation \n ~40min"]
- source_code_validation --> source_code_validation_override[Source Code Validation Override]
- signing --> required_validation["Required Validation \n ~1h"]
- signing --> validation["Validation \n ~5h"]
- required_validation --> sbom_generation["SBOM Generation \n ~20m"]
- required_validation --> required_validation_override[Required Validation Override]
- validation --> validation_override[Validation Override]
- required_validation_override --> publishing_v3_signed["Publish Post-Signing Assets \n ~1h20m"]
- required_validation_override --> post_signing_publishing["Publish Signed Assets \n ~1h30m"]
- required_validation_override --> vs_insertion["VS Insertion Ring \n ~50m"]
- vs_insertion --> vs_insertion_override["VS Insertion \n Override \n "]
- vs_insertion_override --> test_team_sign_off["Waiting for \n Test team \n to Sign off \n "]
- test_team_sign_off --> staging_ring["Staging Ring \n "]
- source_code_validation_override --> staging_ring["Staging Ring \n ~1h10m"]
- staging_ring --> staging_ring_override["Staging Ring \n Override \n "]
- staging_ring_override --> sign_off_for_finalizing_release["Sign off for \nfinalizing \n the release \n "]
- sign_off_for_finalizing_release --> finalize_staging_ring["Finalize \n Staging Ring \n "]
- sign_off_for_finalizing_release --> publish_cit_validated_assets["Publish CTI \n validated assets \n 1h20m"]
- finalize_staging_ring --> approve_publishing_dotnetcs_internal["Approve \n publishing to \n dotnetcsinternal \n "]
- approve_publishing_dotnetcs_internal --> handoff_publishing_ring["Handoff \n Publishing Ring \n (dotnetcsinternal) \n "]
- sbom_generation --> sbom_generation_override[SBOM Generation Override]
-```
-
-## V1:
-### Splitting Staging Pipeline:
-
-Proposed implementation is splitting the pipeline into two pipelines.
-#### 1. First pipeline: Stage-Dotnet-Prepare-Artifacts
-The Stage-Dotnet-Prepare-Artifacts pipeline will contain stages that alters or generates new (e.g. SBOM) artifacts.
-```mermaid
-flowchart LR;
- prep["Prep Ring \n ~30min"] --> prep_override[Prep Ring Override]
- prep_override --> signing["Signing Ring \n ~50min"]
- signing--> sbom_generation["SBOM Generation \n ~20m"]
- sbom_generation--> kick_2nd_pipeline["Stage-Dotnet-Validate-Publish pipeline kick off \n ~20m"]
- sbom_generation --> sbom_generation_override[SBOM Generation Override]
-```
-#### 2. Second pipeline: Stage-Dotnet-Validate-Publish
-The Stage-Dotnet-Validate-Publish pipeline will contain validation and publishing stages.
-```mermaid
-flowchart LR;
- start["Queued \n"] --> validation["Validation ~5h"]
- validation --> validation_override[Validation Override]
- start--> source_code_validation["Source Code \n Validation \n ~40min"]
- source_code_validation --> source_code_validation_override[Source Code \n Validation \n Override]
- start--> required_validation["Required \n Validation \n ~1h"]
- required_validation --> required_validation_override[Required \n Validation \n Override]
- required_validation_override --> publishing_v3_signed["Publish \n Post-Signing Assets \n ~1h20m"]
- required_validation_override --> post_signing_publishing["Publish Signed \n Assets \n ~1h30m"]
- required_validation_override --> vs_insertion["VS Insertion \n Ring \n ~50m"]
- vs_insertion --> vs_insertion_override["VS Insertion \n Override \n "]
- vs_insertion_override --> test_team_sign_off["Waiting for \n Test team \n to Sign off \n "]
- test_team_sign_off --> staging_ring["Staging Ring \n "]
- source_code_validation_override --> staging_ring["Staging Ring \n ~1h10m"]
- staging_ring --> staging_ring_override["Staging Ring \n Override \n "]
- staging_ring_override --> sign_off_for_finalizing_release["Sign off for \nfinalizing \n the release \n "]
- sign_off_for_finalizing_release --> finalize_staging_ring["Finalize \n Staging Ring \n "]
- sign_off_for_finalizing_release --> publish_cit_validated_assets["Publish CTI \n validated assets \n 1h20m"]
- finalize_staging_ring --> approve_publishing_dotnetcs_internal["Approve \n publishing to \n dotnetcsinternal \n "]
- approve_publishing_dotnetcs_internal --> handoff_publishing_ring["Handoff \n Publishing Ring \n (dotnetcsinternal) \n "]
-```
-
-### Advantages of splitting pipeline:
-1. In .Net 9 we are going to move signing to main build. By splitting pipeline we are going to be in alignment with that plan, meaning we can retire the first pipeline when time comes and only the second pipeline will be staging pipeline then.
-1. Bug fix and testing in validation and publishing stages are lot faster. We do not have to wait for the build to be signed everytime we make a fix to validation/publishing stage or re-run flaky tests.
-1. We can add ability to rerun to smaller subset of stages as compared to Stage-Dotnet pipeline.
-
-### Interface between the Stage-Dotne-Prepare-Artifacts Pipeline and Stage-Dotnet-Validate-Publish pipeline:
-
-The first pipeline Stage-Dotnet-Prepare-Artifacts (May be during the create SBOM stage or after) kicks off a build in the Stage-Dotnet-Validate-Publish pipeline. This is similar to what we have in maestro promotion pipeline. The Stage-Dotnet-Prepare-Artifacts is not dependent on the Stage-Dotnet-Validate-Publish pipeline to be completed.
-
-The second pipeline Stage-Dotnet-Validate-Publish pipeline downloads the signed build artifacts from the Stage-Dotnet-Prepare-Artifacts pipeline.
-
-One of the main reason for adding the trigger to kick off a the Stage-Dotnet-Validate-Publish from the Stage-Dotnet-Prepare-Artifacts pipeline is so that we don't have to manually kick off the Stage-Dotnet-Validate-Publish build after the first pipeline completes. Additionally the Stage-Dotnet-Validate-Publish can be kicked off manually too. It will use BarBuildID / BuildId combination to download the signed assets from the first pipeline.
-
-## V2:
-
-Rerruning stages in Stage-Dotnet-Validate-Publish pipeline:
-
-Say Publishing Signed Assets fails and validation is successful
-1. We can add the ability to pick and choose the stages to run in the second pipeline.
-
-Here we skipped Validation stage altogether. Similarly we can can add ability to skip other stages in the pipeline.
-
-
-```mermaid
-flowchart LR;
- start["Queued \n"] --> publishing_v3_signed["Publish \n Post-Signing Assets \n ~1h20m"]
- start --> post_signing_publishing["Publish Signed \n Assets \n ~1h30m"]
- start --> vs_insertion["VS Insertion \n Ring \n ~50m"]
- vs_insertion --> vs_insertion_override["VS Insertion \n Override \n "]
- vs_insertion_override --> test_team_sign_off["Waiting for \n Test team \n to Sign off \n "]
- test_team_sign_off --> staging_ring["Staging Ring \n "]
- staging_ring --> staging_ring_override["Staging Ring \n Override \n "]
- staging_ring_override --> sign_off_for_finalizing_release["Sign off for \nfinalizing \n the release \n "]
- sign_off_for_finalizing_release --> finalize_staging_ring["Finalize \n Staging Ring \n "]
- sign_off_for_finalizing_release --> publish_cit_validated_assets["Publish CTI \n validated assets \n 1h20m"]
- finalize_staging_ring --> approve_publishing_dotnetcs_internal["Approve \n publishing to \n dotnetcsinternal \n "]
- approve_publishing_dotnetcs_internal --> handoff_publishing_ring["Handoff \n Publishing Ring \n (dotnetcsinternal) \n "]
-```
-
-#### Risks:
-1. We are dissecting the staging pipeline, there is a chance we might be messing up publishing to correct storage containers.
-1. Since we will retire the old pipeline only after the new pipelines Stage-Dotnet-Prepare-Artifacts and Stage-Dotnet-Validate-Publish are up and running, there may be an extended period where we need to maintain extra pipelines.
-
-#### Additional information:
-1. Verify we don't duplicate publishing of assets is tracked in (issue)[https://github.com/dotnet/arcade/issues/13025]
-1. Adding testing infra to Staging pipeline is tracked (here)[https://github.com/dotnet/arcade/issues/13462]
-
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/secret-sweep-and-clean-core-eng-13551.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/secret-sweep-and-clean-core-eng-13551.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/secret-sweep-and-clean-core-eng-13551.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/secret-sweep-and-clean-core-eng-13551.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,486 +0,0 @@
-# Dnceng Secret Sweep n' Clean
-
-## Summary
-
-The .NET Engineering services team manages multiple resources that are
-necessary for the development, servicing and release of the .NET
-product. Several parts of the engineering and product infrastructure
-depend on secrets that allow access to sensitive resources.
-
-The Engineering services team has already done extensive work to enforce
-the security of these secrets:
-
-- Any resources that can be accessed by external contributors to the
- .NET product don\'t have access to secrets (helix public queues, PR
- builds in dnceng public instance)
-
-- All secrets are stored in a Key Vault, and rotated on a regular
- cadence, partially without human interaction via the secret manager.
-
-- Secrets can be revoked at any moment in case of a suspected breach.
-
-- All secrets required by build and release pipelines are stored in
- variable groups, with a backing key vault.
-
-- Access to Key vaults is (mostly) restricted to the Engineering
- Services team.
-
-The company-wide increased focus on Security brings the need to do some
-additional work in our secret management practices to ensure that we
-follow several key principles.
-
-- **Least privilege**: Secrets should provide only the minimum
- required level of access to sensitive resources. At the same time, a
- secret should only allow access to a very limited set of resources.
-
-- **Zero trust**: Secrets can only be accessed by resources/people who
- have a compelling need to access them. Not all secrets need to be
- accessed by every member of the team.
-
-- **Minimizing shared information**: Resources and infrastructure
- components should only access the secrets they need for their
- functioning and no others.
-
-Keeping these principles in mind, there's a need to:
-
-1. Perform a point-in-time audit of our existing secrets and
- secret-adjacent resources.
-
-2. Create new processes and guidelines on how to perform future audits
- and cleanup.
-
-3. Develop improvements to our infrastructure in such a way that we can
- uphold these principles by default when possible.
-
-## Stakeholders
-
-- .NET Engineering Services team (contact: \@dnceng)
-
-- All .NET Engineering Services partners
-
-## Risks
-
-**What are the unknowns?**
-
-- By changing the layout, scope and access level of our secrets, there
- is a risk that we will break existing repo workflows or
- infrastructure that we are not aware of. This will require providing
- guidance and alternatives to teams that are using secrets in a way
- that we didn't intend to. This is also an opportunity for
- improvement and increased visibility into these hidden workflows.
-
-- There's a risk that restricting the access level of certain secrets,
- some parts of our infrastructure will temporarily stop working while
- we uncover discrepancies between what we think a secret does, and
- how it's used. For example, we might uncover Personal Access Tokens
- (PATs) that carry more access than their names imply.
-
-**Are there any POCs (proof of concepts) that need to be built for this
-work?**
-
-Probably not. We will continue using and extending the existing secret
-manager tooling.
-
-**What dependencies will this work have? Are the dependencies currently
-in a state that the functionality in the work can consume them now, or
-will they need to be updated?**
-
-This work shouldn't depend on any other work. The audit and cleanup can
-happen at the same time as we develop new functionalities in the secret
-manager. There will be a need to update various parts of our
-infrastructure to account for any Key Vault layout changes and
-permission guidelines that come as part of this work.
-
-**Will the new implementation of any existing functionality cause
-breaking changes for existing consumers?**
-
-Yes. There is a good chance that restricting the access and scope of
-certain secrets will break existing tooling and workflows that we are
-not directly involved in, as well as places where we not using secrets
-in the way we believe they are being used. As we both start enforcing
-that every variable group needs to be sourced by a key vault, and we
-restrict PAT scopes further, secret names will inevitably change, which
-will cause the need for changes to YAML build definitions that rely on
-the changed secrets.
-
-We will restrict variable groups to only be used by specific pipelines,
-and this will most likely require reauthorization on the first run of
-every pipeline that uses the variable group after the change.
-
-**Is there a goal to have this work completed by, and what is the risk
-of not hitting that date? (e.g. missed OKRs, increased pain-points for
-consumers, functionality is required for the next product release, et
-cetera)**
-
-This work should be able to be completed in parallel with the product
-development and the development of new features to our infrastructure,
-and any impact should be brief. However, we should aim to complete this
-work early in the .NET 7 development cycle to ensure that any new
-infrastructure and product changes can take advantage of any new
-guidelines and enhanced security that comes from this work.
-
-## Open Questions
-
-- Should we touch any product specific azure resources and key vaults?
-
-- What kind of access to resources will we need to give to different
- ICs that are not part of the dnceng team?
-
-- Is attempting to manage Azure DevOps service connections in scope
- for this epic? Managing these service connections has been a pain
- point as the personal access tokens that back them expire without
- any notification.
-
-## Components to change
-
-The components that we have identified that will need to be audited or
-updated are:
-
-### Azure Key vaults
-
-We want to make changes to the layout and access policies of our key
-vaults, and the individual secrets contained within them. The following
-subscriptions are in scope for this work:
-
-- Dotnet Engineering Services
-
-- Dnceng-InternalTooling
-
-- HelixProd
-
-- HelixStaging
-
-**High-level activities**
-
-1. Split our existing vaults in such a way that we have a separation
- between vaults that hold secrets needed to be accessed by
- automation, from vaults that hold secrets that should be accessed by
- humans. If there are any secrets identified as required by both
- types of users (such as a limited set of SAS tokens for storage
- account access), we should add them to the human accessible key
- vaults. This way we can restrict access to the automation-only key
- vaults. Some examples of each kind of secret are:
-
- 1. **Secrets required by automation only:**
-
- - Connection strings to databases
-
- - Azure storage account keys and SAS tokens
-
- - GitHub, Helix, Maestro and Azure DevOps Tokens belonging to bot
- users
-
- - Proxy feed URLs for 2.1 servicing
-
- - Aka.ms secrets
-
- - Other secrets accessed by build and signing pipeline
-
- - GitHub Application client secrets
-
- 2. **Secrets required by humans**
-
- - Credentials to bot accounts (one time password seeds and
- recovery codes, usernames, passwords)
-
- - SAS tokens for storage accounts that dnceng users don't have
- access to, and for partner team access to dnceng storage
- accounts (Helix, OSOB related).
-
-2. Split vaults that hold secrets that grant a high level of access to
- resources, from vaults that hold secrets that grant more granular
- access. Secrets like storage account keys and bot login information
- allow for the creation of other secrets. We shouldn't keep bot
- usernames and passwords in the same vaults that hold secrets
- generated from those identities, and we shouldn't hold Storage
- account keys and SAS tokens in the same vault. This grants us the
- ability to restrict user access to the more granular resources and
- tighten the access policies for secrets that grant a high level of
- permissions. Only dnceng administrators should have access to the
- broad access secrets.
-
-3. Make sure each one of our services has access to the least number of
- vaults possible. This is something we already attempt with service
- fabric clusters only having access to a single vault per
- environment, but we should ensure this is the case for all our
- infrastructure.
-
-4. Eliminate duplication of secrets where possible. There are bound to
- be scenarios where duplication is necessary, but we should opt for
- generating multiple secrets that do the same thing (such as PATs
- with duplicate scopes, SAS tokens to storage accounts), so that they
- can be rotated or revoked individually without affecting multiple
- resources or separate components of the infrastructure.
-
-### Personal Access tokens
-
-We rely on various PATs to perform operations on behalf of our bot users
-in several parts of our infrastructure across multiple GitHub and azure
-DevOps organizations. Managing these PATs has historically proven
-painful, and the impact of mismanaging them is very high, as usually our
-bots have very high levels of access to most of our infrastructure.
-
-**High-level activities**
-
-1. Make sure that PATs only have only the minimum required set of
- scopes for the functionality they perform.
-
-2. Create a sustainable process to generate Azure DevOps PATs for
- multiple organizations in such a way that 1ES automation
- ()
- will not change them underneath us.
-
-3. Make it easier to generate PATs for our bots, such that we can
- encourage the creation and management of multiple PATs with the same
- scopes as opposed to reusing the same PAT for multiple purposes.
-
-4. Ensure that every PAT used by infrastructure is performing
- operations on behalf of our bot users, and not individual team
- members.
-
-5. Make sure that every PAT is accounted for in a secret manager
- manifest so that it can be managed by automation.
-
-6. Enforce the naming convention of \ for
- existing and future PATs.
-
-### Azure DevOps Variable Groups
-
-The way that we pipe secrets from our key vaults to be used by build and
-release pipelines is through azure devops variable groups. As their name
-implies, these components can group variables in a logical collection.
-These groups, and the variables contained within variables can then be
-referenced in yaml pipelines and the classic editor.
-
-As part of this effort, we will audit every variable group in the dnceng
-organization, as well as every matching variable group that we own in
-the devdiv azure devops instance.
-
-**High-level activities**
-
-1. Ensure all variables groups that hold secrets are linked to a key
- vault. If the variable group is not owned by the engineering
- services team, work with the variable group owners to find the best
- location for a vault to store the secrets. This will mean that
- variable groups that currently mix secret and non-secret variables
- should be split into two groups.
-
-2. Change all variable groups so that they are not granted access to
- every pipeline in the Azure DevOps organization where they live, and
- instead individual pipelines should be granted access on a "as
- needed" basis.
-
-3. Audit pipelines and yaml templates to make sure they only request
- access to the minimal set of variable groups they need to work
- properly.
-
-4. Write tooling to enforce the new policies. We will need tools /
- scripts that:
-
- a. Identify variable groups that are not linked to vaults
-
- b. Identify secrets that are present in variable groups but are not
- used by any pipeline
-
-### Azure Storage Accounts
-
-The .NET engineering infrastructure relies on several storage Azure
-storage accounts so that our services and pipelines run properly. As
-part of this effort, we should make sure that broad access to storage
-accounts is only granted to administrators, while we improve the tooling
-needed to minimize the effects of cycling storage account keys.
-
-For the purposes of this work, we will make a distinction between
-infrastructure development and product release storage accounts.
-
-**Product release storage accounts** refer to the storage accounts
-where we upload bits that we will end up releasing to the public,
-servicing releases, as well as the source-build tarballs that we share
-with RedHat. Dnceng has limited access to these accounts, as they are
-hosted in subscriptions outside of general access. We want to focus most
-of our effort in this group of storage accounts.
-
-The storage accounts that fall in this group are:
-
-- Dotnetcli
-
-- DotnetcliMSRC
-
-- DotnetCliChecksums
-
-- DotnetCliChecksumsMSRC
-
-- Dotnetbuilds
-
-- DotnetFeed
-
-- DotnetFeedMRC
-
-**Infrastructure storage accounts** refer to storage accounts that are
-needed for the functioning of services and infrastructure owned by
-dnceng, for example the Helix service and autoscaler, and the
-Helix-machines repository.
-
-**High-level activities**
-
-1. Audit our infrastructure for places where we use storage account
- keys for access and try to replace them with SAS tokens instead.
- Update all places where it's reasonable to make such changes.
-
-2. Audit existing SAS tokens in our key vaults to make sure that they
- only have the least required access to the resources they grant
- access to, and to make sure the secret names match the access level
- the sas tokens grant.
-
-3. As specified in the Azure Key Vault section, split the vaults where
- we store storage account keys from the vaults where we store SAS
- tokens. No individuals in dnceng or the product teams should have
- access to the vault where we store the keys. In cases where these
- secrets need to be cycled, access to these vaults should be granted
- either JIT, or there should be breadcrumbs so that devs know which
- users have access to the resources.
-
-4. Cycle all keys for product release storage accounts once they are in
- their own vault.
-
-5. Create SAS tokens managed by the secret manager tooling to grant
- restricted access to individual containers in these storage
- accounts.
-
-6. Write guidance for access policies to the product release storage
- accounts. Create additional storage accounts for non-release
- workflows that have historically used other storage accounts without
- a compelling reason to do so.
-
-### Secret Manager
-
-Secret manager is the tooling we built to be able to manage secrets in
-an automated fashion. Part of the work in this effort will require
-additions to secret manager to achieve the level of enforcement and
-modularity that we want to add to our key vaults and secrets.
-
-**High-level activities**
-
-1. Improve the usability and documentation around secret manager, with
- usage examples and different examples of commands that should run
- for different scenarios. As well as detailed documentation on how to
- onboard each type of supported secret.
-
-2. Add the understanding of "dependent secrets" to secret manager. A
- dependent secret is a secret that should be regenerated once another
- secret is cycled or revoked. An example of this scenario is that all
- SAS tokens that belong to a storage account need to be regenerated
- once both keys to a storage account are cycled.
-
-3. Add the ability to automatically generate PATs through the tool
- based on metadata in the secret manifests that shows which user,
- which organizations and scopes PATs should be generated with.
-
-4. Add the ability to enforce naming conventions in secrets in the
- manifests. We can make secret manager fail pipelines if it detects a
- secret with an incorrect naming convention.
-
-6. We need to make a pass through all the existing secret manifests to
- make sure there is enough information in each one of them for secret
- types that the tool can't rotate automatically, such as aka.ms
- secrets.
-
-## Serviceability
-
-#### How will the components that make up this epic be tested?
-
-For the required changes to secret manager, we will use and extend the
-existing tests to make sure that any new scenarios don't break existing
-functionality.
-
-For the secret audit and restructuring of vaults, it is difficult to
-test whether a change to a secret will have a negative impact on where
-it's used, so we will rely on a deployment strategy that minimizes the
-impact of any scope or permission changes we perform. For secrets that
-are used inside our different services and infrastructure, we will
-depend on the test coverage of the different services to make sure that
-changes to secrets do not cause breaking changes to the services.
-
-#### How will we have confidence in the deployments/shipping of the components of this work?
-
-Secrets used by our services will be tested as part of the service
-deployments themselves. Changes to secrets used by pipelines will be
-tested by running the pipelines that use them.
-
-## Rollout and Deployment
-
-### How will we roll this out safely into production?
-
-Changes to secret manager will rely on running the new functionality
-against the staging versions of the manifests for the different
-services. There is usually some discrepancy between the staging and
-production secrets but making sure the staging services still work with
-updated manifests is a good start.
-
-### How often and with what means will we deploy this?
-
-Changes to secret manager will be deployed weekly as part of the
-arcade-services deployments. Changes to the layout of vaults and secrets
-themselves will need to be deployed in stages:
-
-**For new vaults/secrets created as part of this epic**:
-
-1. Create copies of the existing secrets in their new locations
-
-2. Inform partners that we will be working on a particular set of
- secrets, and to expect some disruption while we make the switch
-
-3. Change references to the secrets to pull from their new locations
-
-4. Ensure the secrets work when pulled from the new locations
-
-5. Delete the old secrets / vaults.
-
-**For existing secrets that lose some privilege**
-
-1. Inform partners that changes to secrets are going to be performed,
- and to let us know if they see any of their workflows affected in a
- negative way
-
-2. Monitor the pipelines and services that use the secrets that got
- rescoped to make sure they are still functioning.
-
-### What needs to be deployed and where?
-
-Code changes to secret manager will be deployed via arcade-services,
-where the tool lives.
-
-New key vaults will be deployed to the dnceng owned subscriptions,
-depending on where the services that use the vaults live.
-
-### What are the risks when deploying?
-
-There are multiple risks when trying to make changes to secrets:
-
-- Restricting access to storage accounts may break some teams'
- workflows. We should work with affected teams to provide guidance
- for how to securely access the resources they were previously used
- to accessing without restriction.
-
-- Changing the scopes of PATs has the risk of breaking infrastructure
- that assumed the PATs had undocumented scopes. We should regenerate
- any PATs and secrets with the proper scopes when this happens.
-
-- Changes to pipeline access for variable groups will need to be
- accompanied by manual authorization of every pipeline that uses the
- variable group. We should write instructions for the entire dnceng
- team, so they know how to evaluate and perform this authorization.
-
-## FR Handoff
-
-### What documentation/information needs to be provided to FR so the team is successful in maintaining these changes?
-
-Part of the epic work involves better documentation and guides for the
-usage of secret manager. We will also write instructions for any
-potentially disruptive changes to secret access and how to deal with
-them.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Csecret-sweep-and-clean-core-eng-13551.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Csecret-sweep-and-clean-core-eng-13551.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Csecret-sweep-and-clean-core-eng-13551.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/stateless-pools-cost-analysis.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/stateless-pools-cost-analysis.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/stateless-pools-cost-analysis.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/stateless-pools-cost-analysis.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,205 +0,0 @@
-# Stateless Pools - A Cost Analysis
-
-## Purpose
-In order to determine the costs of switching from stateful pools to stateless ones, the following study was conducted.
-Switching to stateless pools will resolve bug [dotnet/core-eng#14683](https://github.com/dotnet/core-eng/issues/14683), which
-involves machines running out of disk space due to multiple runs on the same machine. 1ES has informed us that the working
-directories on stateful machines will not be cleaned between runs and that we should switch to stateless pools in order to
-avoid this problem.
-
-# Time Cost
-
-## Methodology
-Three pipelines were examined as part of this study: runtime (ID 686), runtime-dev-innerloop (ID 700), and runtime-staging (ID 924).
-These particular pipelines were picked because they could all be triggered via a single runtime PR.
-To determine a baseline for the stateful 1ES pools, runs between 8 Oct 2021 and 22 Oct 2021 in the TimelineBuilds table in Kusto were queried.
-Specifically, this this study only concerned itself with runs which met the following criteria:
-* _Successful_: in order to make sure builds were as similar as possible, all failing runs were excluded from the results
-* _Main_: once again for the purpose of consistency, only runs targeting the `main` branch were used
-* _Inlier_: builds which took longer than five hours to complete were excluded from the dataset in interest of eliminating outliers
-
-To acquire the data for the stateless pool, 10 runs of these pipelines were triggered manually targeting the
-NetCore-Public-Int pool, which was manually set to be stateless and capable of scaling out to 300 machines.
-The sample size for these runs is necessarily much smaller than the baseline due to time constraints. The runs were
-triggered on 22 Oct 2021 and 25 Oct 2021.
-
-BuildIds of stateless runs are as follows:
-* runtime: `(1436605, 1436874, 1437134, 1439516, 1439517, 1439738, 1439739, 1439773, 1439774, 1439775)`
-* runtime-dev-innerloop: `(1436606, 1436862, 1437004, 1437087, 1439512, 1439618, 1439703, 1439745, 1439776, 1439777)`
-* runtime-staging: `(1436608, 1436875, 1437086, 1439514, 1439545, 1439779, 1439781, 1439782, 1439783, 1439784)`
-
-### Caveats and Limitations
-
-A few notable differences exist between our two samples:
-* The sample size differs massively. A large sample size can be obtained for the baseline and it was, but the smaller sample
-size for the delta inherently makes conclusions from that data less reliable.
-* The baseline data, while having a large sample size, contains runs over many different commits. While they are likely
-still similar enough to be useful for analysis, this differs from the delta data which contains runs over only a single
-commit.
-* While many different jobs were being run all at once during the delta data, this still does not reflect realistic usage
-in prod.
-* Not every run in the delta data was 100% successful -- while none presented a catastrophic failure, several had minor
-failures due to test flakiness.
-* This study still only reflects the usage of one (albeit very large) repo.
-
-### Queries Used
-To determine average & standard deviation:
-```
-TimelineBuilds
-| where DefinitionId == [def] and QueueTime > datetime(10/8/2021) and QueueTime < datetime(10/22/2021) and Result == "succeeded" and TargetBranch == "main"
-| extend BuildTime = StartTime - QueueTime
-| where BuildTime < 5h
-| distinct *
-| summarize avg(BuildTime), totimespan(stdev(BuildTime))
-```
-
-To obtain count data bucketed in fifteen minute intervals:
-```
-TimelineBuilds
-| where DefinitionId == [def] and QueueTime > datetime(10/8/2021) and QueueTime < datetime(10/22/2021) and Result == "succeeded" and TargetBranch == "main"
-| extend BuildTime = StartTime - QueueTime
-| where BuildTime < 5h
-| distinct *
-| summarize count() by bin(BuildTime, 15m)
-| order by BuildTime asc
-```
-
-To obtain percentile data:
-```
-TimelineBuilds
-| where DefinitionId == [def] and QueueTime > datetime(10/8/2021) and QueueTime < datetime(10/22/2021) and Result == "succeeded" and TargetBranch == "main"
-| extend BuildTime = StartTime - QueueTime
-| where BuildTime < 5h
-| distinct *
-| summarize count() by bin(BuildTime, 15m)
-| summarize percentilesw(BuildTime, count_, 50, 75, 95)
-```
-
-To obtain job start time:
-```
-TimelineBuilds
-| where DefinitionId == [def] and QueueTime > datetime(10/8/2021) and QueueTime < datetime(10/22/2021) and Result == "succeeded" and TargetBranch == "main"
-| extend BuildTime = FinishTime - QueueTime
-| where BuildTime < 5h
-| distinct *
-| summarize arg_max(FinishTime, *) by BuildId
-| join kind=inner (TimelineRecords
- | where Order != 0
- | where strlen( Path ) == 11
- | where WorkerName has "NetCore1ESPool"
- | summarize arg_max(FinishTime, *) by RecordId, BuildId ) on BuildId
-| extend JobStartTime = StartTime1 - QueueTime
-| summarize count(), min(JobStartTime), avg(JobStartTime), totimespan(stdev(JobStartTime))
-```
-
-To obtain job percentile data:
-```
-TimelineBuilds
-| where DefinitionId == [def] and QueueTime > datetime(10/8/2021) and QueueTime < datetime(10/22/2021) and TargetBranch == "main"
-| extend BuildTime = FinishTime - QueueTime
-| where BuildTime < 5h
-| summarize arg_max(FinishTime, *) by BuildId
-| join kind=inner (TimelineRecords
- | where Order != 0
- | where strlen( Path ) == 11
- | where WorkerName has "NetCore1ESPool"
- | summarize arg_max(FinishTime, *) by RecordId, BuildId ) on BuildId
-| distinct *
-| extend JobStartTime = StartTime1 - QueueTime
-| summarize count() by bin(JobStartTime, 1m)
-| summarize percentilesw(JobStartTime, count_, 50, 75, 95)
-```
-
-## Results
-The percentiles indicate the percentage of builds that finish in less than the given time (bucketed in 15 minute increments for build times
-and one minute increments for job start times).
-
-### runtime
-
-#### Build Times
-| **Metric** | **Baseline** | **Delta** |
-|-------------|--------------|-----------|
-| Sample Size | 154 | 10 |
-| Mean | 1:52:48 | 2:42:32 |
-| StDev | 0:40:56 | 0:32:48 |
-| 50th %ile | 1:45:00 | 2:45:00 |
-| 75th %ile | 2:15:00 | 3:15:00 |
-| 95th %ile | 3:30:00 | 3:30:00 |
-
-#### Job Start Times
-| **Metric** | **Baseline** | **Delta** |
-|-------------|--------------|-----------|
-| Sample Size | 11186 | 1122 |
-| Minimum | 0:02:17 | 0:03:33 |
-| Mean | 0:18:53 | 0:29:03 |
-| StDev | 0:15:12 | 0:20:01 |
-| 50th %ile | 0:14:00 | 0:17:00 |
-| 75th %ile | 0:31:00 | 0:50:00 |
-| 95th %ile | 0:41:00 | 1:00:00 |
-
-### runtime-dev-innerloop
-
-#### Build Times
-| **Metric** | **Baseline** | **Delta** |
-|-------------|--------------|-----------|
-| Sample Size | 552 | 10 |
-| Mean | 0:57:58 | 1:02:09 |
-| StDev | 0:09:11 | 0:04:32 |
-| 50th %ile | 1:00:00 | 1:15:00 |
-| 75th %ile | 1:15:00 | 1:15:00 |
-| 95th %ile | 1:15:00 | 1:15:00 |
-
-#### Job Start Times
-| **Metric** | **Baseline** | **Delta** |
-|-------------|--------------|-----------|
-| Sample Size | 3865 | 70 |
-| Minimum | 0:00:19 | 0:03:15 |
-| Mean | 0:03:56 | 0:11:33 |
-| StDev | 0:04:14 | 0:03:39 |
-| 50th %ile | 0:03:00 | 0:12:00 |
-| 75th %ile | 0:06:00 | 0:14:00 |
-| 95th %ile | 0:11:00 | 0:18:00 |
-
-### runtime-staging
-
-#### Build Times
-| **Metric** | **Baseline** | **Delta** |
-|-------------|--------------|-----------|
-| Sample Size | 196 | 10 |
-| Mean | 1:17:38 | 3:15:24 |
-| StDev | 0:49:57 | 0:51:29 |
-| 50th %ile | 1:00:00 | 3:30:00 |
-| 75th %ile | 2:15:00 | 4:15:00 |
-| 95th %ile | 2:45:00 | 4:15:00 |
-
-Unfortunately, runtime-staging might not be the most useful pipeline for drawing conclusions as the build time
-distribution appears bimodal which means the mean build time is not as helpful a measurement.
-
-#### Job Start Times
-| **Metric** | **Baseline** | **Delta** |
-|-------------|--------------|-----------|
-| Sample Size | 746 | 80 |
-| Minimum | 0:01:46 | 0:03:44 |
-| Mean | 0:12:48 | 0:20:43 |
-| StDev | 0:04:19 | 0:10:23 |
-| 50th %ile | 0:13:00 | 0:13:00 |
-| 75th %ile | 0:16:00 | 0:16:00 |
-| 95th %ile | 0:20:00 | 0:19:00 |
-
-## Conclusions
-
-While it is difficult to fully attribute one hundred percent of the build time increases to the stateless pools (especially with the delta
-data having a larger standard deviation in the runtime job start data and both the runtime-staging data sets), there is still a marked
-increase in both the build time and the job queue time for all observed pipelines.
-
-Of particular note is the runtime-dev-innerloop pipeline, which has very tight data sets for both the baseline and the delta. With lower
-standard deviations for both builds and jobs, we saw mean build time increase by five minutes and mean job start time increase by
-**nearly eight minutes**. In worst-case scenario runs, such an increase would translate to very high increases in overall build time,
-which may be what is reflected in the runtime and runtime-staging mean build time increases.
-
-Given this data, it is the opinion of the author that stateless pools do not represent a reasonable alternative to stateful pools with
-workspace cleanup.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cstateless-pools-cost-analysis.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cstateless-pools-cost-analysis.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cstateless-pools-cost-analysis.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/telemetry-monitoring-for-android-and-apple-platforms-13607.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/telemetry-monitoring-for-android-and-apple-platforms-13607.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/telemetry-monitoring-for-android-and-apple-platforms-13607.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/telemetry-monitoring-for-android-and-apple-platforms-13607.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,303 +0,0 @@
-# Monitoring for Android and Apple platforms
-
-As part of [#10420](https://github.com/dotnet/core-eng/issues/10420), we started supporting Android and Apple mobile platforms in Helix.
-These new on-prem devices are different in nature to the hardware we were managing so far.
-The devices (and simulators) can "break" in ways we are unable to detect at the moment and thus new approach to monitoring these devices is needed
-
-**The goal of this design document** is to capture our plan for
-- why, how, when and what telemetry to collect,
-- what alerting to create based on this telemetry.
-
-## Motivation
-
-The main goal is to learn fast about mobile devices in some failed state such as inable to install/run an application.
-Currently, we are unable to achieve this because we don't have enough data around the singular operations (XHarness commands) we perform with the devices.
-Furthermore, we don't have data about the big picture.
-
-To mark a mobile device as faulty, we need more than a single failing operation as it is hard to decide based on one data point.
-The reason is that for some groups of problems we need a margin for cases where an operation fails but it's a user error that we just couldn't distinguish from an infra issue.
-For example, if we spot a single installation failure the cause can be an app that was badly built and fails to install which is an user error.
-However, if we see a high percentage of failures over time, probably the device is the faulty part (memory is full, emulator is hanging..).
-In the large amount of data we will have (tens of thousands of operations per day), we will be able to account for these and alert more reliably.
-This is the reason we we need to collect and store data about a series of operations and alert based on the whole.
-
-## Stakeholders
-
-- **.NET Engineering Services** - team servicing the Helix devices
-- **DDFUN** - team managing the hardware - in future, they might be involved in the alert response process
-
-## Scope
-
-The mobile platforms that are in scope of this work are:
-- iOS devices (iPhones)
-- AppleTV devices
-- Android devices
-- Ubuntu Android queues with Android emulators
-- OSX queues with Xcode and Simulators
-
-The overview of the queues and Mobile devices in Helix can be found [here (Mobile devices for .NET testing)](https://dev.azure.com/dnceng/internal/_wiki/wikis/DNCEng%20Services%20Wiki/107/Mobile-devices-for-.NET-testing).
-
-## Alerts
-
-The main goal is to learn about device failures such as inability find the device or to install/run an application there.
-This will be achieved by creating Grafana alerts.
-There are several alerts we want to fire that we know about already.
-These events are usually signaled by XHarness CLI exiting with a specific return code.
-Most of these events are already captured in the [wrapper scripts in Helix SDK](https://github.com/dotnet/arcade/blob/e6abc186425e675e0f201acc1c33e183a0e51c86/src/Microsoft.DotNet.Helix/Sdk/tools/xharness-runner/xharness-runner.apple.sh#L154-L181) and currently we only perform work item retry and reboot the machine, hoping the work item will be dealt with on a healthy machine.
-The problem with this approach is that broken machines stay broken silently.
-The monitoring will help us learn about these cases.
-
-Examples of these event are:
-- HW device is not found on a queue with HW devices – this usually means the device is stuck after a reboot or similar.
-- Apple Simulator is not found while we are not requesting a specific runtime version (we target `ios-simulator-64` for example, which should always be available, whereas `ios-simulator-64_14.2` might not).
-- Android emulator is not available on a queue where we expect it to be running.
-- Long taking operations - we see this for iOS simulators which leak memory/CPU which manifests as the installation of applications slowing down.
-- Low SD card / internal storage disk space - root cause of installation failures in Android.
-
-### Implementation plan for alerts
-
-**Stage 1**
-At first, we will focus on detecting fatal errors when they happen such as "device is not visible".
-These are usually easy to detect since we will get almost a 100% failure rate once a device goes into this state.
-We can detect this over a short period of time and alert.
-The customer won’t be impacted as much thanks to the retry/reboot we already do so this is transparently dealt with.
-It is important for us to know as these things now happen silently and we never learn about them.
-We will be able to take the device offline and investigate it.
-
-**Stage 2**
-As we gain more experience and investigate devices in broken states, we will learn to collect new metrics that can be used to predict breaks before they happen and avoid customer impact.
-Examples can be:
-- Long taking operations - we see this for iOS simulators which leak memory/CPU which manifests as the installation of applications slowing down.
-- Low SD card / internal storage disk space - root cause of installation failures in Android.
-
-**Stage 3**
-For some events, we already self-heal by doing a retry/reboot and thus we might not need an alert.
-We can, however, be notified about this, so that we know when and how often these things happen.
-This stage can introduce additional events as described above.
-These are rather "nice to have" so we can consider the business need later and potentially omit this stage.
-
-## Challenges
-
-Every mobile device is connected via a USB cable to a host machine which acts as a Helix agent - Helix client runs on the host machine only, never the device.
-In contrary to the other platforms, the host machines are not interesting to us very much, we care about the devices.
-Since the Helix client always runs on host machines and the devices are not visible from the Helix perspective, the heartbeat mechanism won't detect bad states of these devices.
-
-Furthermore, it is quite hard to determine the health of these devices for several reasons:
-- We need specialized tooling to find the devices (XHarness, ADB, mlaunch).
-- This tooling needs to be updated frequently (to react to new versions of iOS, Android...).
-- The operations that gather device information can take even longer time than the over all runtime of a large portion of work items we run today that finish in several seconds (e.g., iOS Simulator BCL tests).
-- It is difficult to tell a bad state as some of the problems do not manifest until we try to perform certain actions with the device, e.g., a device is locked but installation succeeds, only trying to run the app will fail.
-
-We explored several approaches but they had several drawbacks which would make them hard to implement/maintain:
-
-- Adding a monitoring command to the XHarness CLI and calling it at the beginning and at the end of the job.
- - It would add sizable time to all the work items.
- - We don't know which device/emulator will be used until we read the application metadata and only after then we look for the appropriate device.
- - We can only detect some problems when we try to install/run applications on the devices which wouldn't work for this command neither. We care about results of some of the operations more than the state of the device because we just don't know the state is corrupted until some operations fail.
- - The user can call many XHarness commands as part of a single Helix work item - they can install the app once and then run it several times over with different parameters. We wouldn't know when the state got corrupted.
-
-- Adding a monitoring module to the machines that would scan the environment periodically and report back.
- - Shares some of the drawbacks from above.
- - Would be hard to maintain/update together with all of the dependencies.
- - Might interfere with currently running Helix work item.
-
-## Architecture & design
-
-> **TLDR:** XHarness will collect diagnostics data during execution and Helix SDK will send them to an Application Insights account as custom metrics so that Grafana can alert based on it.
-
-### Affected components:
-- **XHarness CLI** - tool used to run the Android/Apple tests ([`dotnet/xharness`](https://github.com/dotnet/xharness))
-- **Helix SDK** - MSBuild targets that enable creation of XHarness Helix workloads ([`dotnet/arcade`](https://github.com/dotnet/arcade/blob/main/src/Microsoft.DotNet.Helix/Sdk/tools/xharness-runner/Readme.md))
-- **XHarness workload scripts** - wrapper scripts that execute the commands supplied by the user ([`dotnet/arcade`](https://github.com/dotnet/arcade/blob/main/src/Microsoft.DotNet.Helix/Sdk/tools/xharness-runner/))
-- **Grafana dashboards** - definitions of monitoring charts and alerts ([`dotnet/arcade-services`](https://github.com/dotnet/arcade-services/tree/main/src/Monitoring))
-
-### Data collection process
-
-Based on the requirements and the challenges we face we can collect the data in the following way:
-
-1. We will add a diagnostics mode for every command of the XHarness CLI. There will be a new option and XHarness will also accept this an environment variable.
-2. As the command runs in this mode, it will collect diagnostics data (i.e., create a diagnostic file) with information about which operation was running and how it ended. It will note down which device was used and other useful statistics.
-3. Since we control the execution of user's commands via the Helix SDK wrapper scripts, we can set the environmental variable at the beginning to execute always in the diagnostic mode. We can then collect the results at the end.
-4. We will send the diagnostics to Application Insights (later to Kusto) at the end of the Helix work item. We only need to make sure we have enough time (few seconds at most) in the Helix work item to send the data after user's commands are finished. However, since we control the execution of those, we can time-constrain them and leave a buffer for ourselves.
-
-### Data storage
-
-To store the data, we will use an already existing Application Insights account [helix-client](https://ms.portal.azure.com/#@microsoft.onmicrosoft.com/resource/subscriptions/68672ab8-de0c-40f1-8d1b-ffb20bd62c0f/resourceGroups/helixinfrarg/providers/microsoft.insights/components/helix-client-prod/overview) and upload the diagnostics data there as a [custom metric](https://docs.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-custom-overview).
-To send the data, we will need the API key and then send a request to Azure.
-We have code and infrastructure for this in place already [in the Helix client (appinsights.py)](https://dev.azure.com/dnceng/internal/_git/dotnet-helix-machines?path=%2Fresources%2Fhelix-scripts%2Fhelix%2Fappinsights.py).
-No SDL / threat model changes would be necessary for this.
-
-The metric will have a static name common for all events generated for these purposes such as `XHarnessOperation`.
-The value will be the exit code of the operation and the rest of the properties will go to **customDimensions** (an assorted dictionary).
-We can then query the data points like in the following example:
-
-```sql
-customMetric
-| where name == "XHarnessOperation" and value != 0
-| where customDimensions.platform == "android"
-| where customDimensions.command == "install"
-```
-
-In a later stage, we will migrate to Kusto and send the telemetry together with Helix job events that we are sending already.
-Currently, we are storing the events in the `Metrics` table which also has its own `CustomDimensions` column.
-We will decide at the time of implementation whether we will create our own table or re-use this existing one, it is an implementation detail.
-The queries for AI and Kusto are very similar, using the same language so it will be quite effortless to migrate the charts and alerts.
-
-### Moving to Kusto
-
-In the design stage, we have come to a conclusion that a follow-up effort will be to generate some more rigorous reports about the mobile devices and their reliability.
-For this, Kusto is more convenient place to store the data because of reliability and linkability with other job data.
-The reports are a new requirement that wasn't originally part of the epic and since we need to still support .NET 6, we are on an aggressive schedule.
-Because almost no additional work is introduced when switching to Kusto and also for reasons stated below, we we will use Application Insights to store the data at first and then move to Kusto in a second iteration:
-
-- Kusto changes are more complicated as the data travels to Kusto through EventHub and an SQL table (handled by the Helix service). This means they require Helix service rollout and then Helix machines rollout (for Helix client) so it will take time to get them out.
-- Application Insights only require a PR in Arcade and thus are a better tool for iterating in case we don't get everything right the first time.
-- With AI, we can have the data available immediately and this iterate faster with graphs/alerts to see if we missed something, no release cycle needed.
-- We can have data while we work on changes needed for Kusto already and service the platforms to better accomodate the .NET 6 schedule.
-- No extra work needs to be done when choosing AI, everything is in place already.
-- No additional work is introduced when switching to Kusto, only Kusto specific work that needs to be done anyway, we will just use different Python class to send the same data to a different place (from Helix perspective).
-
-### Collected data
-
-It is important that XHarness CLI stays Helix agnostic as it is used by other teams in scenarios not related to Helix at all.
-This means that XHarness CLI will only collect data related to the operations it performs on the device.
-We will then enrich the diagnostics XHarness data with Helix environment specifics that will help us identify problematic machines.
-
-**XHarness CLI data:**
-- Platform (android/apple)
-- Executed command (install/test/run...)
-- Exit code
-- Duration of the command
-- Emulator/Simulator ID (can also be architecture + OS/API version string)
-
-As we continue to identify new properties of the devices that can help prevent/detect problems, we can extend these data points or add new metrics.
-We can also only choose to report some in case of unsuccessful operations only.
-
-**Helix SDK data:**
-- Machine name
-- Queue name
-
-There are other data points we might choose to collect (XHarness CLI version, Helix work item friendly name…) but we are constrained by some limits set for custom metrics.
-
-**Example data**
-
-Example data for 1 run that uses the granual XHarness operations (we also have `test` command that performs all `install`, `just-test` and `uninstall`) can look something like this:
-
-| Metric | Value | Cloud role | Cloud instance | CD.platform | CD.command | CD.target |
-|---------------------------|------------------|----------------------------|----------------|-------------|------------|-----------------|
-| XHarnessOperation | 0 (SUCCESS) | osx.1015.amd64.iphone.open | DNCENGMAC049 | apple | install | ios-device 14.4 |
-| XHarnessOperationDuration | 40 (sec) | osx.1015.amd64.iphone.open | DNCENGMAC049 | apple | install | ios-device 14.4 |
-| XHarnessOperation | 1 (TESTS FAILED) | osx.1015.amd64.iphone.open | DNCENGMAC049 | apple | just-test | ios-device 14.4 |
-| XHarnessOperationDuration | 150 (sec) | osx.1015.amd64.iphone.open | DNCENGMAC049 | apple | just-test | ios-device 14.4 |
-| XHarnessOperation | 0 (SUCCESS) | osx.1015.amd64.iphone.open | DNCENGMAC049 | apple | uninstall | ios-device 14.4 |
-| XHarnessOperationDuration | 4 (sec) | osx.1015.amd64.iphone.open | DNCENGMAC049 | apple | uninstall | ios-device 14.4 |
-
-*\* CD means Custom Dimensions, just the object is expanded into separate columns*
-
-The `Value` column for `XHarnessOperation` is the exit code of XHarness and number of seconds for `XHarnessOperationDuration`.
-From these data points, we will be able to gather the success rate of each operation per every machine.
-We can be strict for alerting based on some exit codes and more benevolent in other:
-- `DEVICE_NOT_FOUND` results on machines in a queue with devices (not emulators) is probably an error we want to know about fast as it means the device is turned off.
-- `INSTALLATION_FAILED` for Android devices can be the applicaiton's fault (bad app) but can also mean low storage space, so a we can have lower expectations but still want to be alerted about some levels of dropped SLA on a device.
-
-### Metric limits and quotas
-
-There are some [limits set for Application Insights custom metrics](https://docs.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-custom-overview#quotas-and-limits):
-
-- Maximum of 10 custom dimensions per metric
-- Limit of 50,000 total variations of values stored in the custom dimensions
-
-The second limit means that we need to consider the cardinality of each custom dimension and multiply them. This number must not be larger than 50,000.
-The cardinalities of proposed dimensions are:
-
-| Dimension | Values | # of values |
-|------------------|----------------------------------------------------------|:-----------:|
-| Platform | android/apple | 2 |
-| Executed command | test, run, just-test, just-run, install, uninstall | 6 |
-| Test target | Android – 10 API versions, 32/64 bit – roughly 20 values | |
-| | Apple – iOS/tvOS, OS version – roughly 30 values | 50 |
-
-We don’t need any other dimensions because:
-- Exit code will be the value of the metric
-- Machine name and queue name are stored in the metric’s `cloud_RoleName` and `cloud_RoleInstance` fields
-- Simulator vs device can be deduced from queue name
-- Device ID can be deduced from machine name
-
-Altogether, we’re looking at 600 variations which gives us space to grow still.
-There are also other fields in the metrics we could probably use such as `client_OS`.
-Nonetheless, if we choose to collect stats such as free RAM, we should consider adding it as a new metric and keeping its custom dimensions same.
-
-### Data volume & estimated costs
-
-The [metrics pricing](https://azure.microsoft.com/en-us/pricing/details/monitor/) doesn't include custom dimensions yet since they are in preview still.
-The metrics themselves cost $0.258/MB with first 150 MB per month free.
-Each metric is considered 8B so we have 18.75 million metric data points for free.
-
-Currently, the AI account receives 1,500,000 data points per day which amounts to 2-3 GB with 100 GB being the daily volume limit.
-The current monthly cost of the account is 170 USD.
-
-The new metrics we want to collect will be generated by around 70,000 XHarness operations per day which amounts to 4-5 USD per month based on these calculations (70000 data points * 30 days * 8 bytes / 0.258 USD per MB), possibly less if we hit the higher volume band which has lower rates.
-
-## Feature rollout plan
-
-This feature will be on-demand and users of Helix SDK will be able to turn it for their Helix jobs on by setting an MSBuild property.
-We can experiment this way by spawning single tests jobs from the local environment.
-After the feature is code-complete:
-
-- We will start by turning the feature on in **dotnet/xharness** where we have few runs per day (lower tens of metric data points).
-- We will continue by rolling it out gradually for the BCL tests in **dotnet/runtime**. We can do it test suite by test suite, queue by queue, platform by platform. The property can be conditioned in MSBuild easily as part of a **dotnet/runtime** pull request. In case of problems (we start hitting some limits), we can dial it back by another **dotnet/runtime** PR.
-- We will finalize the rollout by adding it to the runtime tests in **dotnet/runtime** (which requires them to start using the Helix SDK properly – pending work on their part).
-- We will make the feature be opt-out and on by default in the Helix SDK.
-
-To make a change to the system, we need to change XHarness / Helix SDK and let the dependency updates flow to **dotnet/runtime**.
-
-## Risk
-
-- What are the unknowns?
- *We only estimated the volume and variety of the data stored in Application Insights based on already existing data. We might need to create a separate account in case we need more custom dimensions. We however have control over this because we can roll out platform by platform.*
-- Are there any PoCs required to be built for this epic?
- *No, this work will be easily testable before put into production.*
-- What dependencies will this epic have? Are the dependencies currently in a state that the functionality in the epic can consume them now, or will they need to be updated?
- *The dependencies (XHarness, Helix SDK, Grafana) are in place and will only be extended.*
-- Will the new implementation of any existing functionality cause breaking changes for existing consumers?
- *No changes, just new functionality which is internal to our team only so no consumer impact.*
-- Is there a goal to have this work completed by, and what is the risk of not hitting that date?
- *There is no hard deadline. Having this delivered will have impact on the smoothness of work on .NET 6 for the mono teams.*
-
-## Serviceability
-
-- How will the components that make up this epic be tested?
- *We have unit tests and E2E tests for both places with code changes (XHarness CLI, Helix SDK). We can verify that the data is flowing to Grafana before merging each change as we have E2E tests for XHarness and we can query by machine. We are unfortunately unable to have some sort of E2E test that makes sure Helix client changes don't break the Helix SDK integration. We would need to have the newest Arcade and Helix SDK in Helix Machines. This seems very complicated to do and my be a bad ROI.*
-- How will we have confidence in the deployments/shipping of the components of this epic?
- *We can test everything in PRs and roll out gradually before merging.*
-- Identifying secrets (e.g. PATs, certificates, et cetera) that will be used (new ones to be created; existing ones to be used).
- *Application Insights API key is already in place, also used on public queues so considered not a secret*
-- Does this change any existing SDL threat or data privacy models?
- *No, we already send data from the Helix client to the same AI account*
-- Steps for setting up repro/test/dev environments?
- *XHarness CLI can be run locally as is. Helix SDK integration tests can be triggered manually from `dotnet/xharness` and `dotnet/arcade` by devs.*
- *We will have a staging Application Insights account.*
-
-## Rollout and Deployment
-- How will we roll this out safely into production?
- *We will feature switch this via an MSBuild property in client repos (dotnet/xharness with low traffic to begin with).*
-- How often and with what means we will deploy this?
- *Rollout process for all artifacts is already in place.*
-- What needs to be deployed and where?
- *XHarness CLI diagnostics functionality and Helix SDK features will be delivered via Maestro updates.*
- *Alerts and charts will be deployed to Grafana using the regular `arcade-services` rollout.*
-- What are the risks when doing it?
- *No risks as we will feature switch this inside of a PR.*
-- What are the dependencies when rolling out?
- *None*
-
-## FR Hand off
-
-Documentation and FR hand off are subsequent goals of the parent epic and will be handled separately:
-- All new alerting should be actionable with links to documentation.
-- Team will be educated on how to service the new platforms.
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Ctelemetry-monitoring-for-android-and-apple-platforms-13607.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Ctelemetry-monitoring-for-android-and-apple-platforms-13607.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Ctelemetry-monitoring-for-android-and-apple-platforms-13607.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/validation-notifications-arcade7299.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/validation-notifications-arcade7299.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/validation-notifications-arcade7299.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/validation-notifications-arcade7299.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,84 +0,0 @@
-# Notifications For Validate-DotNet
-
-## Summary
-
-For the results of the post-build/nightly validation pipeline to be actionable, repo owners or their representatives need to be notified of failures when they occur. We propose to use [BuildMonitor](https://dev.azure.com/dnceng/internal/_wiki/wikis/DNCEng%20Services%20Wiki/185/BuildFailureManagement) to monitor the results of the validation pipeline, and notify repo owners of the failures in their runs based on the tags associated with each build by opening issues in the repositories and adding the repo's 'Area-Infrastructure' label. With these notifications, we will be able to evangelize Validate-DotNet as the supported post-build validation platform, moving away from running validation in official builds completely. For Stage-DotNet, we can also open issues in core-eng, that we can tag with the Release team stakeholders who would then pass the issue on to the product team responsible for any failures.
-
-## Stakeholders
-
-- .NET Core Engineering Services team (contact: @dnceng)
-- .NET Core Release team (contact: @leecow)
-- All product teams that currently use, or will in the future use, Validate-DotNet for their validation needs
-
-## Risk
-
-- What are the unknowns?
-
- The main unknown is how to determine who, for each repository, should be notified on each issue that BuildMonitor will open. We will start by simply adding the repos' infrastructure label to the issues that are created, and we can update that to include an assignee if it becomes desired.
-
-- Are there any POCs (proof of concepts) required to be built for this work?
-
- No, as this work will simply extend BuildMonitor to be able to differentiate builds based on tags, as well as open issues in repositories other than core-eng.
-
-- What dependencies will this epic have? Are the dependencies currently in a state that the functionality in the epic can consume them now, or will they need to be updated?
-
- This work depends on the existing BuildMonitor project. This project will need to be updated, but the work is minimal and known.
-
-- Will the new implementation of any existing functionality cause breaking changes for existing consumers?
-
- Build Monitor will have to be extended so that we can open issues in multiple repositories. Today, BuildMonitor takes a single Issues definition, when we will need to specify multiple. Additionally, we will need a way to map monitors to the repos that they will open issues in. We should be able to do this in a way that does not break current functionality, but it may be a breaking change.
-
-- Is there a goal to have this work completed by, and what is the risk of not hitting that date? (e.g. missed OKRs, increased pain-points for consumers, functionality is required for the next product release, et cetera)
-
- The goal is to have this work completed by June 2021. There is no risk associated with slipping the date.
-
-## Serviceability
-
-- How will the components that make up this epic be tested?
-
- - Existing BuildMonitor tests will be extended to include the components added by this work
-
-- Identifying secrets (e.g. PATs, certificates, et cetera) that will be used (new ones to be created; existing ones to be used).
-
- No new secrets will be needed by the change.
-
-- Does this change any existing SDL threat or data privacy models? (models can be found in [core-eng/SDL](https://github.com/dotnet/core-eng/SDL) folder)
-- Does this require a new SDL threat or data privacy models?
-
- No; the only PII used will be GitHub aliases / labels and this will be augmenting existing functionality.
-
-### Rollout and Deployment
-
-This section left blank as this will be part of an arcade-services component.
-
-## FR Hand off
-
-- What documentation/information needs to be provided to FR so the team as a whole is successful in maintaining these changes?
-
- As the changes for this should be minimal, no additional documentation should be needed.
-
-## Description of the work
-
-### Components changed
-
-#### Component: BuildMonitor
-
-- Changes to [BuildMonitorOptions](https://github.com/dotnet/arcade-services/blob/main/src/DotNet.Status.Web/Options/BuildMonitorOptions.cs)
- - Restructure BuildMonitorOptions, AzurePipelineOptions, and/or IssuesOptions so that we can open issues in multiple repos, not just core-eng. This means either having multiple monitors/issues or linking monitor definitions to issue definitions.
-- Changes to [AzurePipelineOptions](https://github.com/dotnet/arcade-services/blob/main/src/DotNet.Status.Web/Options/BuildMonitorOptions.cs#L20)
- - Add an "tags" field to BuildDescription. Tags on a pipeline allow you to mark the build with a piece of metadata that you can filter on in the AzDO UI, and is also exposed via the AzDO api for pipelines. We use this data so that product teams can filter the pipeline to only see their builds.
-- [ProcessBuildNotificationsAsync()](https://github.com/dotnet/arcade-services/blob/main/src/DotNet.Status.Web/Controllers/AzurePipelinesController.cs#L143)
- - If Tags are set in BuildDescription, compare to the tags for the given build, to determine if this is a build of interest
- - If custom text available, append it to the body of the issue description
-
-#### Component: Repository access
-
-- Build monitor needs to be given access to create issues in each repository that will require notifications. It is currently only enabled in core-eng
-
-#### Component: Schedule-Validation-Pipeline
-
-- Update tags for builds to tag with the repo, the channel, and repo-channel. Right now, we only tag as repo-channel
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cvalidation-notifications-arcade7299.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cvalidation-notifications-arcade7299.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Cvalidation-notifications-arcade7299.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/zenhub-migration-core-eng-15084.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/zenhub-migration-core-eng-15084.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/One-Pagers/zenhub-migration-core-eng-15084.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/One-Pagers/zenhub-migration-core-eng-15084.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,141 +0,0 @@
-# Migration from ZenHub to GitHub Projects (beta)
-
-Going forward, we need to move all of our issue tracking out of ZenHub. The replacement we have chosen is GitHub Projects (beta). While Projects (beta) is not a one-to-one mapping of ZenHub and its concepts, it will fulfill our needs as an issue tracking management system.
-
-## Stakeholders
-
-* .NET Core Engineering Services team (contact: @dnceng)
-* .NET Partners who use ZenHub
-
-## Risks
-
-* What are the unknowns?
- * The unknowns of this epic center around the Projects (beta) board as a whole: can it support the number of issues that we have open? What sort of compromises will we have to make with the new structure? Not everything is directly mappable, so we need to keep this in mind. The GitHub roadmap is [here](https://github.com/github/roadmap/projects/1).
-* Are there any POCs (proofs of concept) that need to be built for this work?
- * We will want a POC for the migration from the ZenHub board to the Projects (beta) board that takes an issue and adds it to the board, and also that takes an epic issue and converts it into a Projects (beta) field on the board
- * We also will likely want a POC board, though several other teams have their own boards that we can use as examples for how they are being used (see the [.NET Docker board](https://github.com/orgs/dotnet/projects/58/views/1) as an example that uses the board, list, triage and epic views).
-* What dependencies will this work have? Are the dependencies currently in a state that the functionality in the work can consume them now, or will they need to be updated?
- * GitHub REST API
- * GitHub GraphQL API - There is no C# API for the Projects (beta) APIs (and the C# API that there is for GraphQL is also in beta), so we are stuck with HTTP requests until we have a C# api to work off of. This could mean changes that happen underneath us could lead to issues that we don't see until we notice that we are missing issues being added to the board.
- * ZenHub API
-* Will the new implementation of any existing functionality cause breaking changes for existing consumers?
- * It should not, other than the fact that the ZenHub board will no longer be our source of truth.
-* Is there a goal to have this work completed by, and what is the risk of not hitting that date? (e.g. missed OKRs, increased pain-points for consumers, functionality is required for the next product release, et cetera)
- * We want to complete the initial migration by the end of February, when our ZenHub subscription runs out.
- * If at all possible, we should also have the automatic adding of issues/epics to the board done by then as well, so the board doesn't need to be manually managed.
-
-## Open Questions
-
-* Single board vs board-per-epic?
- * Both have pros and cons, but a single board with all of the issues will be required for stand up, and is the most directly comparable to what we have now.
-
-* Where should the board live?
- * Because we may have issues in multiple repositories (as we do now with arcade and core-eng), we need to create the board at the organization level, rather than the repository level. That means to get to the board, we will need to go through https://github.com/orgs/dotnet/projects?type=beta. This will enable folks who are working on issues in non-dnceng repositories to add their issues to the dnceng board (for example, Android/Apple issues in dotnet/runtime).
-
-* What are all the use cases for the ZenHub board, and how will those translate to the new Projects (beta) board?
- * Epic Reviews
- * Reviewing all of the epics to determine business priorities
- * Reviewing a single epic
- * Stand up
- * Reviewing all of the work currently being worked on
- * Either by epic or by status
- * FR stand up
- * Reviewing the FR issues currently being worked on
- * Reviewing the FR backlog
- * Triage
- * Reviewing all issues that have currently not been assigned to an epic
- * Individual user view
- * Reviewing all issues assigned to a particular person
-
- All of these scenarios will be manageable in Projects (beta). The only things that don't seem translatable are:
-
- * Convert this issue into an epic
- * We will need to manually add the epic label to the issue, and then have our own app or webhook that monitors for this label and adds the Epic issue to the board as an epic
- * Additionally, the issues won't have all of the extra epic stuff (table with all of the issues and their statuses) that ZenHub gives us
- * Sort by assignee on board view
- * Currently, you can group by status and sort by assignee on the table view in Projects (beta), but not in the board view
- * This may cause us to rethink how we run stand-up. Do we even need to be going over issues on an individual basis? Should our stand ups be adjusted?
-
-* How will we translate the concept of an "epic" to Projects (beta)?
-
-We have used Epics in ZenHub as a way to track the concept of business priorities. While GitHub does not have the exact concepts of epics, they are currently implementing features in issues to track issues in other issues: essentially, what we do with ZenHub epics now. This works by adding a task list to each tracking issue containing each issue that is tracked by that issue/epic. After discussions with GitHub, this is the recommended path forward. Additionally, there is a coming feature that will allow us to display the "Tracked in" issue on the Projects (beta) board, which is comparable to what we have with the ZenHub board. The major challenge with this approach is adding issues to an epic/tracking issue. Today, the only way to do so is to update the markdown description of the tracking issue with either the link, org/repo#issueNumber, or issue number (if it's in the same repository) of the tracked issue. This cannot be done from the tracked issue. Issues can be tracked in multiple tracking issues, and GitHub, behind the scenes, creates a tree structure that can be queried for async triage (though that is still in beta as well). Whether or not this query capability will be in REST or only graphql is still a question.
-
-This will be a major change to how we work when it comes to epics. We will need to be cognizant of updating the epic issue when we create a new issue (or, you can just add a checkbox to the task list and use the "Create issue from task" button that will appear after saving). Unlike before, where we went from issue to epic, will need to start thinking in an epic-first way, where we start at the epic and end at the issue.
-
-The nice thing about this is that each issue will have a "tracked by" link under the title, which will allow us to go back to the epic issue easily.
-
-* How will issues be added to the board?
- * Projects (beta) does not have the ability to automatically add issues to the board when they are created
- * Users can manually add issues at creation time
- * We will need to use a webhook that watches for issue creation and adds those issues to the board automatically if the user didn't do so
-
-## Components to change
-
-This work consists of three parts: the new Projects (beta) board, a command line tool to do the initial port of the ZenHub board to the Projects (beta) board, and a service that adds issues and epics to the board as they are created and/or updated.
-
-### The Projects (beta) board
-
-We need to create a new board and add all of the required columns and fields so that it maintains parity with the ZenHub board. As we do this, we may decide to add new columns and/or remove columns that are rarely used or no longer fit our needs.
-
-### Command line tool
-
-This tool will do the following:
-
-* For every issue in arcade, core-eng, and xliff-tasks
- * Add it to the new Projects (beta) board
-* For every epic issue
- * Add a section to the issue description for a task list
- * Use the ZenHub API to get the list of issues in the epic and add it to the task list markdown if it isn't already there
-
-If all goes well, we will only run this command line tool once to get the initial port done, and then we will be able to switch over to the new board and rely on the service and users correctly adding new issues to epics/the project using the new methodology.
-
-### Service
-
-The service will be a webhook that monitors new and updated issues.
-
-* New Issue:
- * If the issue is not already on the board, add it to the board
-
-Note: GitHub has an issue open to automatically add issues in a repository to a project. When that feature comes out, the service can be decomissioned.
-
-### Async Triage tool
-
-As part of this work, we will be removing the ZenHub board, and therefore must update the async triage tool to monitor the new Projects (beta) board rather than using the ZenHub APIs.
-
-The triage tool will need to walk through all the issues that are still open in each of our repositories, and discover the tracked in information. We can do this using graphql, though the api for it is under a feature flag. However, we should be able to easily get the information for every issue and use linq to identify the issues with no tracking information. My understanding is that when this is released, there will also be a REST API for it that we might be able to switch to.
-
-## Serviceability
-
-* How will the components that make up this epic be tested?
- * With unit tests
- * Potentially with the staging environment to update a staging copy of the board
-* How will we have confidence in the deployments/shipping of the components of this work?
- * We will monitor the board to as issues are added to arcade to make sure that they are also added to the new board.
-
-## Rollout and Deployment
-
-* How will we roll this out safely into production?
- * We will have unit tests for the service
- * We may have a staging board that uses the service in staging to update it, so that we can catch when changes cause the process to fail
-* How often and with what means will we deploy this?
- * This will be rolled out alongside Helix, and will follow the normal helix rollout process
-
-## FR Handoff
-
-* What documentation/information needs to be provided to FR so the team as a whole is successful in maintaining these changes?
- * We will provide documentation for adding and debugging GraphQL queries
- * We will provide documentation on how to run the migration tool, though it should only need to be run once
- * We will provide documentation on new procedures for creating new epics, and how the github webhook works to update the board so that new issues can be added to epics
- * We will provide documentation on how new issues are automatically added to the board, and how to debug the service if they are not being added
-
-## Monitoring
-
-We will use AppInsights for monitoring of the service and add grafana alerts for when there are errors adding issues to the board.
-
-## Decomissioning the CLI tool
-
-Once the migration is complete, we will no longer need the cli tool. After we have successfully helped the rest of the org migrate off of ZenHub, either using the CLI tool or not, we will remove the CLI tool so as to not clutter up helix-services.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Czenhub-migration-core-eng-15084.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Czenhub-migration-core-eng-15084.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5COne-Pagers%5Czenhub-migration-core-eng-15084.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/Rollout-Scorecards/README.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/Rollout-Scorecards/README.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/Rollout-Scorecards/README.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/Rollout-Scorecards/README.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,51 +0,0 @@
-# Rollout Score Card
-
-Each rollout will be scored accordingly. The mechanics are similar to golf scoring, in which the higher the number, the worse the score. The goal is a score of 0. Each item has a threshold associated with it and a score associated for each infraction. The "threshold" is the allowable amount for each item, and any amount over the allowable will incur penalties.
-
-The score card allows us to count up infractions against our rollouts. A successful rollout should be completed in the time allowed (i.e. threshold), and should not require any sort of intervention, like a hotfix or rollback. Also, we should not receive any customer complaints based on the rollout. A rollout like that would give us a score of 0, which is our goal. We should be able to drill down into the score to see why the penalties were incurred and improve our rollout process (e.g. adding validation, testing, et cetera).
-
-1. Time taken to rollout (Every 15 minutes over [rounded up] is worth 1 point). This value is calculated based on the total time to build and deploy the service (explicitly: the sum of the build pipeline's elapsed time and the release pipeline's elapsed time). End-to-end tests run after the service has been deployed are not added to this value. (Reason for this is that the faster our builds and deployments are, the faster we can make changes and hotfixes, if necessary).
-
- A. OS Onboarding Threshold: 1 hour
-
- B. Helix Threshold: 30 minutes
-
- C. Arcade-Services Threshold: 1 hour
-
-2. Number of critical/blocking issues as a result of the rollout (1 point per issue). Threshold is 0.
-
-3. Service downtime (availability and reliability) as a result of the rollout (1 point for every minute of downtime). Threshold is 0.
-
-4. Number of hotfixes (5 points per hotfix). Threshold is 0. Also, the time it takes to roll this out is cumulatively added to the initial rollout time (see #1 above).
-
-5. Number of rollbacks (10 points per rollback). Threshold is 0. Also, the time it takes to roll this out is cumulatively added to the initial rollout time (see #1 above).
-
-6. Failure to rollout (50 points). This is a scenario in which we have made a commitment to our customers (e.g. posted it in release notes), and did not meet the window to rollout. This includes partial rollouts (i.e. rollouts in which some parts of the deployment failed and were not remedied).
-
-Example: A rollout of OS Onboarding took 6 hours (20 points), Helix took less than 30 minutes (0 points), and Arcade-Services took 1 hour (0 points). A critical issue occurred in Helix (1 point), which resulted in the need for a hotfix (5 points) and another rollout of Helix, that took less than 30 minutes (0 points). During that time, the component in Helix that was broken caused a service downtime of an hour (60 points). This score of this rollout would have been 86 points.
-
-## Dashboard
-
-A Power BI dashboard of all rollout scorecard metrics so far can be found [here](https://msit.powerbi.com/groups/de8c4cb8-b06d-4af8-8609-3182bb4bdc7c/reports/6d2bd5cd-f96f-40df-af3f-33fd4cf1d82d).
-
-## Pre-Requisites
-
-In order for this to work, we'll need the entire team to be on board with this process:
-
-* Every hotfix needs to be noted as a hotfix for us to measure those metrics. Same with rollbacks.
-* Issues (critical or not) that are a result of a rollout need to be marked as such with labels.
-
-The full guidelines for these are noted in the [Policy Document](/Documentation/Policy/DeploymentPolicy.md).
-
-## Automation
-
-Rollout Scorecards are automatically created, submitted, and logged by the [Rollout Scorer](https://github.com/dotnet/arcade-services/tree/master/src/RolloutScorer/Readme.md).
-
-## July 24, 2019 Rollout (Example)
-
-The July 2019 rollout served as the basis for this scorecard. Its scorecard can be found [here](Scorecard_2019-07-24.md).
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5CRollout-Scorecards%5CREADME.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5CRollout-Scorecards%5CREADME.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5CRollout-Scorecards%5CREADME.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/Rollout-Scorecards/Rollout_Scorer_Automation_Proposal.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/Rollout-Scorecards/Rollout_Scorer_Automation_Proposal.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/Rollout-Scorecards/Rollout_Scorer_Automation_Proposal.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/Rollout-Scorecards/Rollout_Scorer_Automation_Proposal.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,67 +0,0 @@
-# Rollout Scorer Automation Proposal
-
-The [Rollout Scorer](/MiscTools/RolloutScorer/Readme.md) as it exists now is very useful for speeding up the process
-of scoring our rollouts. However, it could still be made more valuable by fully automating it as a service that requires
-minimal human intervention.
-
-## Defining and automating the remaining metrics
-Currently, the rollout scorer is capable of automatically calculating the total rollout duration, number of critical issues,
-number of hotfixes, and number of rollbacks from build timelines and labeled core-eng issues. However, it still relies on
-manual input to determine downtime and rollout failure.
-
-### Rollout failure
-Rollout failure is a boolean value, and failed rollouts gain an extra fifty points. In the past, we have defined rollout
-failure as "we sent out release notes for a rollout and then did not perform a rollout at all." Thus, the only rollout
-previously categorized as a failure was the [4 September 2019 OSOB rollout](./Scorecard_2019-09-04.md). Under this definition,
-rollout failure requires manual categorization since there is definitionally no evidence that a rollout was attempted.
-
-However, a more robust definition of rollout failure is "we did not roll out everything we promised in the release notes."
-This still encompasses the previous definition, but additionally encompasses rollouts with partially successful deployments.
-Anecdotally, both of the previous OSOB rollouts at time of writing fall under this definition, while there has only been one
-rollout since we started tracking these that meets the earlier definition.
-
-This definition also has the benefit that it is significantly easier to measure –
-**if the final build of a rollout isnot green, the rollout is categorized as a failed deployment.**
-
-### Downtime
-Part of the reason downtime relied on manual input was that the definition itself is incredibly nebulous.
-The [Rollout Scorecard's readme](./README.md) notes that downtime includes both "availability and reliability."
-
-The telemetry epic folks are expecting roll out alerting for our availability metrics for Helix and Maestro
-(which should cover arcade-services measurements); these metrics are already measured [here](https://dotnet-eng-grafana.westus2.cloudapp.azure.com/d/quNLOchZz/service-availability?orgId=2&refresh=30s).
-The next step should be to add alerting on metrics for reliability. Adding this alerting is outside the scope of this project, but some proposals for how it could be done are included below.
-
-
-Alerting suggestions
-
-To begin with, we should start with an extremely broad definition of reliability downtime to capture the most catastrophic failures.
-To this end, I propose the following alerting queries:
-
-* To measure OSOB downtime, look back over the past thirty five minutes and compare the queue depth to started jobs across all queues.
-* **If there are more than an arbitrary number of queued jobs (e.g. 100) and zero started jobs, OSOB should be considered "down" and an alert will be triggered.**
-* To measure Helix downtime, we will measure the status of the controller with the same query structure but compare service bus queue depth
-* to queued jobs across all queues.
-* **If service bus queue depth is greater than zero and there are zero queued jobs, Helix should be considered "down" and an alert will be triggered.**
-
-
-Once alerting is in place, alerts will be filed as GitHub issues in core-eng. The Rollout Scorer is already pointed at core-eng for issues,
-hotfixes, and rollbacks, and thus we can use these issues to determine downtime as well. Issues from these alerts can be automatically
-tagged with a *Rollout Downtime* label and the appropriate repo rollout label. The Rollout Scorer will then measure downtime as
-**the finish time of the build that ran prior to the opening of the alert to the time the issue is closed.** We could also use a different
-event to indicate the end of a downtime period, such as leaving a comment on the issue saying "Downtime resolved at [time]" or something similar.
-This has the additional benefit of allowing downtime issues to be manually filed if necessary, similar to the manual hotfix/rollout issues we already have.
-
-## Azure Function and Build Triggers
-
-The existing Rollout Scorer will be moved to arcade-services and deployed as an Azure Function.
-
-1. The function will poll the deployment table (see [dotnet/arcade-services#814](https://github.com/dotnet/arcade-services/pull/814)) for new entries
-2. Upon finding a new deployment, it will set a timer for two days to allow a buffer for the rollout to complete fully. (This timer could be revised to a day if our rollout periods shorten.
-3. As further entries come in (from other rollouts in the same period of time including both hotfixes and other repos), the timer will be reset and collect a list of repos to score.
-4. Once the timer hits zero, the rollout scorer will score all the repos it received requests from with the start date set to the first day it received a request. As at present, it will create a PR to core-eng and add the scores to the database.
-
-This will effectively fully automate the rollout scorecard creation process.
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5CRollout-Scorecards%5CRollout_Scorer_Automation_Proposal.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5CRollout-Scorecards%5CRollout_Scorer_Automation_Proposal.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5CRollout-Scorecards%5CRollout_Scorer_Automation_Proposal.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/Rollout-Scorecards/Rollout_Scorer_Proposal.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/Rollout-Scorecards/Rollout_Scorer_Proposal.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/Rollout-Scorecards/Rollout_Scorer_Proposal.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/Rollout-Scorecards/Rollout_Scorer_Proposal.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,42 +0,0 @@
-# Rollout Scorer Proposal
-
-This is a proposal for the Rollout Scorer which will assist in generating rollout scorecards. Given a few inputs, it will scrape AzDO (and later, telemetry sources) to calculate a score and then generate a markdown file in a PR to core-eng and upload the data to Kusto.
-
-## Tool Description
-The Rollout Scorer will be a command-line tool. The arguments it will accept are as follows:
-
-| Argument | Required? | Description |
-|:------------------------------:|:------------:|:--------------------------------------|
-| `--repo` or `-r` | **Required** | The repository to score |
-| `--branch` or `-b` | *Optional* | The branch of the repo to score(e.g. servicing or prod); defaults to production |
-| `--rollout-start-date` or `-s` | **Required** | The date on which the rollout started |
-| `--rollout-end-date` or `-e` | *Optional* | The date on which the rollout ended; defaults to current date |
-| `--number-of-rollbacks` | *Optional* | The number of rollbacks which occurred as part of the rollout; defaults to 0 |
-| `--downtime` or `-d` | *Optional* | Specifies an amount of downtime which occurred |
-| `--failed` or `-f` | *Optional* | Indicates a failed rollout (50 points) |
-| `--output` or `-o` | *Optional* | File which the generated csv will be outputted to; defaults to `./scorecard.csv` |
-| `--skip-output` | *Optional* | Skips the output step and directly uploads results |
-| `--upload` or `-u` | *Optional* | Replaces all other parameters; uploads csv file to Kusto and makes PR in core-eng |
-
-The flow for using the Rollout Scorer is as follows:
-* Run `RolloutScorer.exe` and specify the repo, rollout start date, and any optional parameters
-* The Rollout Scorer will scrape AzDO for the appropriate data and create a CSV file containing the scorecard data
-* User can make manual corrections to the CSV file as necessary
-* Run `RolloutScorer.exe --upload {csv}` and the Rollout Scorer will upload the CSV file to Kusto and AzDO
-
-As shown in the parameters table, the user can optionally choose to skip the manual CSV adjustment stage.
-
-## Score Calculation
-The Rollout Scorer will reference an INI file which will contain a map from repository name to the URI of the AzDO release or build definition. It will scrape this definition for all of the builds (targeting the production branch) or releases that occurred within the specified timeframe. From this data it will calculate:
-
-* **Total rollout time** — The sum of all build/release times
-* **Number of critical issues** — Calculated from the number of commits in each hotfix release/build
-* **Number of hotfixes** — Calculated from number of release/builds after the first one
-* **Number of rollbacks** — Manually specified by the user
-* **Downtime** — Manually specified by the user, but eventually will be calculated from telemetry
-* **Failure to rollout** — Manually specified by the user
-
-
-
-Was this helpful? [![Yes](https://helix.dot.net/f/ip/5?p=Documentation%5CTeamProcess%5CRollout-Scorecards%5CRollout_Scorer_Proposal.md)](https://helix.dot.net/f/p/5?p=Documentation%5CTeamProcess%5CRollout-Scorecards%5CRollout_Scorer_Proposal.md) [![No](https://helix.dot.net/f/in)](https://helix.dot.net/f/n/5?p=Documentation%5CTeamProcess%5CRollout-Scorecards%5CRollout_Scorer_Proposal.md)
-
diff -Nru dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/Rollout-Scorecards/Scorecard_2019-07-24.md dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/Rollout-Scorecards/Scorecard_2019-07-24.md
--- dotnet8-8.0.100-8.0.0~rc2/src/arcade/Documentation/TeamProcess/Rollout-Scorecards/Scorecard_2019-07-24.md 2023-10-18 18:08:29.000000000 +0000
+++ dotnet8-8.0.100-8.0.0/src/arcade/Documentation/TeamProcess/Rollout-Scorecards/Scorecard_2019-07-24.md 1970-01-01 00:00:00.000000000 +0000
@@ -1,74 +0,0 @@
-# 24 July 2019 Rollout Summaries
-
-## Helix
-
-| Metric | Value | Target | Score |
-|:--------------------------------:|:--------:|:-------:|:------:|
-| Time to Rollout | 05:14:06 | 0:30:00 | 19 |
-| Critical/blocking issues created | 9 | 0 | 9 |
-| Hotfixes | 5 | 0 | 25 |
-| Rollbacks | 1 | 0 | 10 |
-| Service downtime | 0:00:00 | 0:00:00 | 0 |
-| Failed to rollout | FALSE | FALSE | 0 |
-| **Total** | | | **63** |
-
-## OS Onboarding
-
-| Metric | Value | Target | Score |
-|:--------------------------------:|:--------:|:-------:|:-------:|
-| Time to Rollout | 22:47:26 | 1:00:00 | 88 |
-| Critical/blocking issues created | 4 | 0 | 4 |
-| Hotfixes | 4 | 0 | 20 |
-| Rollbacks | 0 | 0 | 0 |
-| Service downtime | 0:00:00 | 0:00:00 | 0 |
-| Failed to rollout | FALSE | FALSE | 0 |
-| **Total** | | | **112** |
-
-## Arcade Services
-
-| Metric | Value | Target | Score |
-|:--------------------------------:|:--------:|:-------:|:------:|
-| Time to Rollout | 03:11:00 | 1:00:00 | 9 |
-| Critical/blocking issues created | 2 | 0 | 2 |
-| Hotfixes | 2 | 0 | 10 |
-| Rollbacks | 0 | 0 | 0 |
-| Service downtime | 0 | 0:00:00 | 0 |
-| Failed to rollout | FALSE | FALSE | 0 |
-| **Total** | | | **21** |
-
-# Breakdowns
-
-## Helix
-
-| Metric | [2019072401.01](https://dev.azure.com/mseng/Tools/_releaseProgress?_a=release-pipeline-progress&releaseId=2672) | [2019072403.01](https://dev.azure.com/mseng/Tools/_releaseProgress?_a=release-pipeline-progress&releaseId=2673) | [2019072404.01](https://dev.azure.com/mseng/Tools/_releaseProgress?_a=release-pipeline-progress&releaseId=2675) | [2019072501.01](https://dev.azure.com/mseng/Tools/_releaseProgress?_a=release-pipeline-progress&releaseId=2676) | [2019072505.01](https://dev.azure.com/mseng/Tools/_releaseProgress?_a=release-pipeline-progress&releaseId=2678) | [2019072506.01](https://dev.azure.com/mseng/Tools/_releaseProgress?_a=release-pipeline-progress&releaseId=2679) | Total |
-|:--------------------------------:|:-------------:|:-------------:|:-------------:|:-------------:|:-------------:|:-------------:|:--------:|
-| Time to Rollout | 2:07:08 | 0:28:10 | 0:28:16 | 0:32:02 | 0:37:00 | 1:01:30 | 05:14:06 |
-| Critical/blocking issues resolved | 1 SQL column | 1 [c20e1561](https://dev.azure.com/mseng/Tools/_git/CoreFX%20Engineering%20Infrastructure/commit/c20e156129da8169c7d255bffd21ef142e254dae?refName=refs%2Fheads%2Fmaster) | 1 [d0726c2f](https://dev.azure.com/mseng/Tools/_git/CoreFX%20Engineering%20Infrastructure/commit/d0726c2ff82299da276d0d874c5de9afde909eaf?refName=refs%2Fheads%2Fmaster) | 1 [#7124](https://github.com/dotnet/core-eng/issues/7124)/[54f92c93](https://dev.azure.com/mseng/Tools/_git/CoreFX%20Engineering%20Infrastructure/commit/54f92c9389ee7bd0d31989f4594648e61b496fa9?refName=refs%2Fheads%2Fmaster) | 4 [072d7af1](https://dev.azure.com/mseng/Tools/_git/CoreFX%20Engineering%20Infrastructure/commit/072d7af1be25f2a14c6f87b96e55b60df20d3134?refName=refs%2Fheads%2Fmaster)/[e5a768b8](https://dev.azure.com/mseng/Tools/_git/CoreFX%20Engineering%20Infrastructure/commit/e5a768b85ee03a0876ff54461373f4d0b9f72ca3?refName=refs%2Fheads%2Fmaster) [82a3f7d2](https://dev.azure.com/mseng/Tools/_git/CoreFX%20Engineering%20Infrastructure/commit/82a3f7d2f8b478c18fe0b9d14967fce35ce5baa5?refName=refs%2Fheads%2Fmaster) [c1fece1b](https://dev.azure.com/mseng/Tools/_git/CoreFX%20Engineering%20Infrastructure/commit/c1fece1b3af0aedc7772cbf78631efdebcbd9d39?refName=refs%2Fheads%2Fmaster) [dddb7d19](https://dev.azure.com/mseng/Tools/_git/CoreFX%20Engineering%20Infrastructure/commit/dddb7d19cddc0398e2d1a228b2a6a598a12a525a?refName=refs%2Fheads%2Fmaster) | 1 [82b0cfba](https://dev.azure.com/mseng/Tools/_git/CoreFX%20Engineering%20Infrastructure/commit/82b0cfba1a618fc70a3f8f2c35604c6efa972b53?refName=refs%2Fheads%2Fmaster) | 9 |
-| Hotfixes | 0 | 1 | 1 | 1 | 1 | 1 | 5 |
-| Rollbacks | 0 | 0 | 0 | 0 | 1 [072d7af1](https://dev.azure.com/mseng/Tools/_git/CoreFX%20Engineering%20Infrastructure/commit/072d7af1be25f2a14c6f87b96e55b60df20d3134?refName=refs%2Fheads%2Fmaster)/[e5a768b8](https://dev.azure.com/mseng/Tools/_git/CoreFX%20Engineering%20Infrastructure/commit/e5a768b85ee03a0876ff54461373f4d0b9f72ca3?refName=refs%2Fheads%2Fmaster) | 0 | 1 |
-| Service downtime | 0:00:00 | 0:00:00 | 0:00:00 | 0:00:00 | 0:00:00 | 0:00:00 | 0:00:00 |
-
-## OS Onboarding
-
-| Metric | [2019072401](https://dev.azure.com/dnceng/internal/_build/results?buildId=277460) | [2019072402](https://dev.azure.com/dnceng/internal/_build/results?buildId=277925) | [2019072403](https://dev.azure.com/dnceng/internal/_build/results?buildId=278090) | [2019072404](https://dev.azure.com/dnceng/internal/_build/results?buildId=278216) | [2019072501](https://dev.azure.com/dnceng/internal/_build/results?buildId=279688) | [2019072601](https://dev.azure.com/dnceng/internal/_build/results?buildId=281352) | [2019072602](https://dev.azure.com/dnceng/internal/_build/results?buildId=281850) | Total |
-|:--------------------------------:|:----------:|:----------:|:----------:|:----------:|:----------:|:----------:|:----------:|----------|
-| Time to Rollout | 06:00:17 | 00:43:44 | 01:23:51 | 03:40:23 | 03:03:58 | 04:56:34 | 02:58:39 | 22:47:26 |
-| Critical/blocking issues resolved | 0