Add Jira maintenance & housekeeping scripts

This commit is contained in:
Martin F. 2026-02-19 16:27:14 +01:00
parent a08ff5b88f
commit ff7b92c058
23 changed files with 2409 additions and 914 deletions

View File

@ -0,0 +1,80 @@
/**
* Notification Scheme Housekeeping (Cloud) Report + optional Delete
* -------------------------------------------------------------------
* Strategy:
* - PROTECTED_PROJECT_KEYS sind die einzigen Projekte, die "leben"
* - Ein Scheme darf nur gelöscht werden, wenn es in KEINEM dieser Projekte steckt
*/
def PROTECTED_PROJECT_KEYS = ["NIN","NICS","NINPDS","NINPDSARC","CS","CRON"]
def DRY_RUN = true // erst auf false, wenn der Report passt
logger.info("=== Notification Scheme Housekeeping ===")
logger.info("Protected projects: ${PROTECTED_PROJECT_KEYS}")
logger.info("DRY_RUN: ${DRY_RUN}")
// Helper: Project -> NotificationSchemeId (nur 6 Calls, ok)
def projectToSchemeId = [:]
PROTECTED_PROJECT_KEYS.each { key ->
def resp = get("/rest/api/3/project/${key}/notificationscheme")
.asObject(Map)
if (resp.status == 200) {
projectToSchemeId[key] = resp.body?.id?.toString()
} else {
projectToSchemeId[key] = null
logger.warn("WARN|PROJECT_LOOKUP_FAILED|${key}|status=${resp.status}")
}
}
logger.info("INFO|PROJECT_SCHEME_MAP|${projectToSchemeId}")
// Alle Schemes holen
def schemesResp = get("/rest/api/3/notificationscheme")
.asObject(Map)
assert schemesResp.status == 200
def schemes = schemesResp.body?.values ?: []
def candidates = [] // [id, name]
def kept = 0
schemes.each { scheme ->
def schemeId = scheme.id?.toString()
def schemeName = scheme.name?.toString()
def usedBy = projectToSchemeId.findAll { k, v -> v == schemeId }*.key
if (usedBy && !usedBy.isEmpty()) {
kept++
logger.info("KEEP|schemeId=${schemeId}|name=${schemeName}|usedBy=${usedBy}")
} else {
candidates << [schemeId, schemeName]
logger.info("DEL?|schemeId=${schemeId}|name=${schemeName}|usedBy=[]")
if (!DRY_RUN) {
def delResp = delete("/rest/api/3/notificationscheme/${schemeId}")
.asString()
if (delResp.status == 204) {
logger.info("DEL|OK|schemeId=${schemeId}|name=${schemeName}")
} else {
logger.error("DEL|FAIL|schemeId=${schemeId}|name=${schemeName}|status=${delResp.status}|body=${delResp.body}")
}
}
}
}
logger.info("=== SUMMARY ===")
logger.info("Total schemes: ${schemes.size()}")
logger.info("Kept (used by protected projects): ${kept}")
logger.info("Delete candidates: ${candidates.size()}")
// Kandidaten am Ende nochmal gesammelt, damit du sie easy kopieren kannst
candidates.each { c ->
logger.info("CANDIDATE|schemeId=${c[0]}|name=${c[1]}")
}
logger.info("=== DONE ===")

View File

@ -0,0 +1,124 @@
/**
* Permission Scheme Housekeeping (Cloud) Report + optional Delete
* ----------------------------------------------------------------
* Delete rule:
* - Scheme darf nur gelöscht werden, wenn:
* 1) es in keinem PROTECTED_PROJECT verwendet wird
* 2) es nicht über PROTECTED_SCHEME_IDS geschützt ist
* 3) es nicht über PROTECTED_NAME_PATTERNS geschützt ist
*/
def PROTECTED_PROJECT_KEYS = ["NIN","NICS","NINPDS","NINPDSARC","CS","CRON"]
// Zusätzliche Sicherung:
def PROTECTED_SCHEME_IDS = [
// "10000", // <- optional: hier IDs eintragen, die NIE gelöscht werden dürfen
]
def PROTECTED_NAME_PATTERNS = [
"default",
"system"
]
def DRY_RUN = true // erst auf false, wenn der Report passt
logger.info("=== Permission Scheme Housekeeping ===")
logger.info("Protected projects: ${PROTECTED_PROJECT_KEYS}")
logger.info("Protected scheme IDs: ${PROTECTED_SCHEME_IDS}")
logger.info("Protected name patterns: ${PROTECTED_NAME_PATTERNS}")
logger.info("DRY_RUN: ${DRY_RUN}")
// Helper: Name-Schutz (case-insensitive)
def isNameProtected = { String name ->
def n = (name ?: "").toLowerCase()
return PROTECTED_NAME_PATTERNS.any { p -> n.contains((p ?: "").toLowerCase()) }
}
// Helper: ID-Schutz
def isIdProtected = { String id ->
return PROTECTED_SCHEME_IDS.any { it?.toString() == id?.toString() }
}
// 1) Project -> PermissionSchemeId (nur 6 Calls)
def projectToSchemeId = [:]
PROTECTED_PROJECT_KEYS.each { key ->
def resp = get("/rest/api/3/project/${key}/permissionscheme")
.asObject(Map)
if (resp.status == 200) {
// Jira liefert hier i.d.R. {id, name, ...}
projectToSchemeId[key] = resp.body?.id?.toString()
} else {
projectToSchemeId[key] = null
logger.warn("WARN|PROJECT_LOOKUP_FAILED|${key}|status=${resp.status}")
}
}
logger.info("INFO|PROJECT_SCHEME_MAP|${projectToSchemeId}")
// 2) Alle Permission Schemes holen
def schemesResp = get("/rest/api/3/permissionscheme")
.asObject(Map)
assert schemesResp.status == 200
// Je nach API-Response ist das typischerweise body.permissionSchemes
def schemes = schemesResp.body?.permissionSchemes ?: []
logger.info("INFO|TOTAL_SCHEMES|${schemes.size()}")
def candidates = [] // [id, name, reason]
def keptByUsage = 0
def keptByRule = 0
schemes.each { scheme ->
def schemeId = scheme.id?.toString()
def schemeName = scheme.name?.toString()
def usedBy = projectToSchemeId.findAll { k, v -> v == schemeId }*.key
def nameProtected = isNameProtected(schemeName)
def idProtected = isIdProtected(schemeId)
// Schutz greift immer (egal ob benutzt oder nicht)
if (idProtected || nameProtected) {
keptByRule++
def why = []
if (idProtected) why << "ID_PROTECTED"
if (nameProtected) why << "NAME_PROTECTED"
logger.info("KEEP|schemeId=${schemeId}|name=${schemeName}|usedBy=${usedBy}|reason=${why}")
return
}
// Projekt-Usage entscheidet
if (usedBy && !usedBy.isEmpty()) {
keptByUsage++
logger.info("KEEP|schemeId=${schemeId}|name=${schemeName}|usedBy=${usedBy}|reason=USED_BY_PROTECTED_PROJECT")
return
}
// Kandidat
logger.info("DEL?|schemeId=${schemeId}|name=${schemeName}|usedBy=[]|reason=UNUSED")
candidates << [schemeId, schemeName, "UNUSED"]
if (!DRY_RUN) {
def delResp = delete("/rest/api/3/permissionscheme/${schemeId}")
.asString()
if (delResp.status == 204) {
logger.info("DEL|OK|schemeId=${schemeId}|name=${schemeName}")
} else {
logger.error("DEL|FAIL|schemeId=${schemeId}|name=${schemeName}|status=${delResp.status}|body=${delResp.body}")
}
}
}
logger.info("=== SUMMARY ===")
logger.info("Total schemes: ${schemes.size()}")
logger.info("Kept (used by protected projects): ${keptByUsage}")
logger.info("Kept (protected by rules): ${keptByRule}")
logger.info("Delete candidates: ${candidates.size()}")
candidates.each { c ->
logger.info("CANDIDATE|schemeId=${c[0]}|name=${c[1]}|reason=${c[2]}")
}
logger.info("=== DONE ===")

View File

@ -0,0 +1,193 @@
/**
* Issue Type Scheme Housekeeping (Cloud) Report + optional Delete
* ----------------------------------------------------------------
* Korrekte Strategie:
* - Hole alle Projekte (id + key)
* - Für jedes Projekt: hole IssueTypeScheme-Zuordnung
* - Delete nur, wenn Scheme an KEINEM Projekt hängt
* - Extra-Schutz über IDs + Name-Patterns
*/
def PROTECTED_PROJECT_KEYS = ["NIN","NICS","NINPDS","NINPDSARC","CS","CRON"]
// Zusätzliche Sicherung:
def PROTECTED_SCHEME_IDS = [
// "10000"
]
def PROTECTED_NAME_PATTERNS = [
"default",
"system"
]
def DRY_RUN = true // erst auf false setzen wenn Report passt
logger.info("=== Issue Type Scheme Housekeeping ===")
logger.info("Protected projects (keys): ${PROTECTED_PROJECT_KEYS}")
logger.info("Protected scheme IDs: ${PROTECTED_SCHEME_IDS}")
logger.info("Protected name patterns: ${PROTECTED_NAME_PATTERNS}")
logger.info("DRY_RUN: ${DRY_RUN}")
def isNameProtected = { String name ->
def n = (name ?: "").toLowerCase()
return PROTECTED_NAME_PATTERNS.any { p -> n.contains((p ?: "").toLowerCase()) }
}
def isIdProtected = { String id ->
return PROTECTED_SCHEME_IDS.any { it?.toString() == id?.toString() }
}
/**
* 1) Alle Projekte holen (key + id)
*/
def projects = []
def startAt = 0
def maxResults = 50
while (true) {
def resp = get("/rest/api/3/project/search?startAt=${startAt}&maxResults=${maxResults}")
.asObject(Map)
if (resp.status != 200) {
logger.error("ERROR|PROJECT_SEARCH_FAILED|status=${resp.status}|body=${resp.body}")
break
}
def values = resp.body?.values ?: []
projects.addAll(values)
def isLast = resp.body?.isLast
if (isLast == true || values.isEmpty()) break
startAt += maxResults
}
logger.info("INFO|TOTAL_PROJECTS|${projects.size()}")
def projectIdToKey = [:]
projects.each { p ->
def pid = p?.id?.toString()
def pkey = p?.key?.toString()
if (pid && pkey) projectIdToKey[pid] = pkey
}
/**
* 2) Mapping: schemeId -> [projectIds]
* (korrekt: Endpoint braucht projectId)
*/
def schemeToProjectIds = [:].withDefault { [] }
projectIdToKey.keySet().each { pid ->
def resp = get("/rest/api/3/issuetypescheme/project?projectId=${pid}")
.asObject(Map)
if (resp.status != 200) {
logger.warn("WARN|ISSUETYPE_SCHEME_LOOKUP_FAILED|projectId=${pid}|status=${resp.status}")
return
}
def values = resp.body?.values ?: []
values.each { row ->
def schemeId = row?.issueTypeScheme?.id?.toString()
def pids = (row?.projectIds ?: []).collect { it?.toString() }.findAll { it != null }
if (schemeId) {
// Achtung: pids enthält hier normalerweise genau das pid, wir mergen trotzdem sauber
schemeToProjectIds[schemeId] = (schemeToProjectIds[schemeId] + pids).unique()
}
}
}
/**
* 3) Geschützte Projekt-IDs berechnen (für Reporting)
*/
def protectedProjectIds = projectIdToKey.findAll { id, key -> PROTECTED_PROJECT_KEYS.contains(key) }*.key
logger.info("INFO|PROTECTED_PROJECT_IDS|${protectedProjectIds}")
/**
* 4) Alle Issue Type Schemes holen
*/
def schemes = []
startAt = 0
maxResults = 100
while (true) {
def resp = get("/rest/api/3/issuetypescheme?startAt=${startAt}&maxResults=${maxResults}")
.asObject(Map)
if (resp.status != 200) {
logger.error("ERROR|SCHEME_LIST_FAILED|status=${resp.status}|body=${resp.body}")
break
}
def values = resp.body?.values ?: []
schemes.addAll(values)
def isLast = resp.body?.isLast
if (isLast == true || values.isEmpty()) break
startAt += maxResults
}
logger.info("INFO|TOTAL_SCHEMES|${schemes.size()}")
/**
* 5) Auswertung
*/
def keptByUsage = 0
def keptByRule = 0
def candidates = [] // [id,name,reason]
schemes.each { s ->
def schemeId = s?.id?.toString()
def schemeName = s?.name?.toString()
def assocProjectIds = schemeToProjectIds[schemeId] ?: []
def assocProjectKeys = assocProjectIds.collect { projectIdToKey[it] }.findAll { it != null }.unique()
def usedByProtectedKeys = assocProjectKeys.intersect(PROTECTED_PROJECT_KEYS)
def nameProtected = isNameProtected(schemeName)
def idProtected = isIdProtected(schemeId)
if (idProtected || nameProtected) {
keptByRule++
def why = []
if (idProtected) why << "ID_PROTECTED"
if (nameProtected) why << "NAME_PROTECTED"
logger.info("KEEP|schemeId=${schemeId}|name=${schemeName}|assocProjects=${assocProjectKeys}|usedByProtected=${usedByProtectedKeys}|reason=${why}")
return
}
// Wenn es an irgendeinem Projekt hängt: nicht löschbar
if (!assocProjectIds.isEmpty()) {
keptByUsage++
logger.info("KEEP|schemeId=${schemeId}|name=${schemeName}|assocProjects=${assocProjectKeys}|usedByProtected=${usedByProtectedKeys}|reason=ASSOCIATED_TO_PROJECTS")
return
}
// wirklich unzugeordnet: Kandidat
logger.info("DEL?|schemeId=${schemeId}|name=${schemeName}|assocProjects=[]|reason=UNASSOCIATED")
candidates << [schemeId, schemeName, "UNASSOCIATED"]
if (!DRY_RUN) {
def delResp = delete("/rest/api/3/issuetypescheme/${schemeId}")
.asString()
if (delResp.status == 204) {
logger.info("DEL|OK|schemeId=${schemeId}|name=${schemeName}")
} else {
logger.error("DEL|FAIL|schemeId=${schemeId}|name=${schemeName}|status=${delResp.status}|body=${delResp.body}")
}
}
}
logger.info("=== SUMMARY ===")
logger.info("Total schemes: ${schemes.size()}")
logger.info("Kept (associated to any projects): ${keptByUsage}")
logger.info("Kept (protected by rules): ${keptByRule}")
logger.info("Delete candidates: ${candidates.size()}")
candidates.each { c ->
logger.info("CANDIDATE|schemeId=${c[0]}|name=${c[1]}|reason=${c[2]}")
}
logger.info("=== DONE ===")

View File

@ -0,0 +1,177 @@
/**
* Issue Type Screen Scheme Housekeeping (Cloud) Report + optional Delete
* -----------------------------------------------------------------------
* Delete rule:
* - Scheme darf nur gelöscht werden, wenn es mit KEINEM Projekt verknüpft ist.
* - Extra-Schutz über PROTECTED_SCHEME_IDS und PROTECTED_NAME_PATTERNS.
*/
def PROTECTED_SCHEME_IDS = [
// "10000"
]
def PROTECTED_NAME_PATTERNS = [
"default",
"system"
]
def DRY_RUN = true // erst auf false setzen, wenn Report passt
def PROJECT_PAGE_SIZE = 50 // paging für project/search
def SCHEME_PAGE_SIZE = 100 // paging für issuetypescreenscheme
logger.info("=== Issue Type Screen Scheme Housekeeping ===")
logger.info("Protected scheme IDs: ${PROTECTED_SCHEME_IDS}")
logger.info("Protected name patterns: ${PROTECTED_NAME_PATTERNS}")
logger.info("DRY_RUN: ${DRY_RUN}")
def isNameProtected = { String name ->
def n = (name ?: "").toLowerCase()
return PROTECTED_NAME_PATTERNS.any { p -> n.contains((p ?: "").toLowerCase()) }
}
def isIdProtected = { String id ->
return PROTECTED_SCHEME_IDS.any { it?.toString() == id?.toString() }
}
/**
* 1) Alle Projekte holen (id + key)
*/
def projects = []
def startAt = 0
while (true) {
def resp = get("/rest/api/3/project/search?startAt=${startAt}&maxResults=${PROJECT_PAGE_SIZE}")
.asObject(Map)
if (resp.status != 200) {
logger.error("ERROR|PROJECT_SEARCH_FAILED|status=${resp.status}|body=${resp.body}")
break
}
def values = resp.body?.values ?: []
projects.addAll(values)
def isLast = resp.body?.isLast
if (isLast == true || values.isEmpty()) break
startAt += PROJECT_PAGE_SIZE
}
logger.info("INFO|TOTAL_PROJECTS|${projects.size()}")
def projectIdToKey = [:]
projects.each { p ->
def pid = p?.id?.toString()
def pkey = p?.key?.toString()
if (pid && pkey) projectIdToKey[pid] = pkey
}
/**
* 2) Mapping: issueTypeScreenSchemeId -> [projectIds]
* (Endpoint braucht projectId, daher über alle Projekte iterieren)
*/
def schemeToProjectIds = [:].withDefault { [] }
projectIdToKey.keySet().each { pid ->
def resp = get("/rest/api/3/issuetypescreenscheme/project?projectId=${pid}")
.asObject(Map)
if (resp.status != 200) {
logger.warn("WARN|SCHEME_LOOKUP_FAILED|projectId=${pid}|status=${resp.status}")
return
}
def values = resp.body?.values ?: []
values.each { row ->
def schemeId = row?.issueTypeScreenScheme?.id?.toString()
def pids = (row?.projectIds ?: []).collect { it?.toString() }.findAll { it != null }
if (schemeId) {
schemeToProjectIds[schemeId] = (schemeToProjectIds[schemeId] + pids).unique()
}
}
}
/**
* 3) Alle Issue Type Screen Schemes holen
*/
def schemes = []
startAt = 0
while (true) {
def resp = get("/rest/api/3/issuetypescreenscheme?startAt=${startAt}&maxResults=${SCHEME_PAGE_SIZE}")
.asObject(Map)
if (resp.status != 200) {
logger.error("ERROR|SCHEME_LIST_FAILED|status=${resp.status}|body=${resp.body}")
break
}
def values = resp.body?.values ?: []
schemes.addAll(values)
def isLast = resp.body?.isLast
if (isLast == true || values.isEmpty()) break
startAt += SCHEME_PAGE_SIZE
}
logger.info("INFO|TOTAL_SCHEMES|${schemes.size()}")
/**
* 4) Auswertung + optional Delete
*/
def keptAssociated = 0
def keptProtected = 0
def candidates = [] // [id,name,reason]
schemes.each { s ->
def schemeId = s?.id?.toString()
def schemeName = s?.name?.toString()
def assocProjectIds = schemeToProjectIds[schemeId] ?: []
def assocProjectKeys = assocProjectIds.collect { projectIdToKey[it] }.findAll { it != null }.unique()
def nameProtected = isNameProtected(schemeName)
def idProtected = isIdProtected(schemeId)
if (idProtected || nameProtected) {
keptProtected++
def why = []
if (idProtected) why << "ID_PROTECTED"
if (nameProtected) why << "NAME_PROTECTED"
logger.info("KEEP|schemeId=${schemeId}|name=${schemeName}|assocProjects=${assocProjectKeys}|reason=${why}")
return
}
// Wenn irgendwo zugeordnet: nicht löschen
if (!assocProjectIds.isEmpty()) {
keptAssociated++
logger.info("KEEP|schemeId=${schemeId}|name=${schemeName}|assocProjects=${assocProjectKeys}|reason=ASSOCIATED_TO_PROJECTS")
return
}
// Wirklich unassoziiert: Kandidat
logger.info("DEL?|schemeId=${schemeId}|name=${schemeName}|assocProjects=[]|reason=UNASSOCIATED")
candidates << [schemeId, schemeName, "UNASSOCIATED"]
if (!DRY_RUN) {
def delResp = delete("/rest/api/3/issuetypescreenscheme/${schemeId}")
.asString()
if (delResp.status == 204) {
logger.info("DEL|OK|schemeId=${schemeId}|name=${schemeName}")
} else {
logger.error("DEL|FAIL|schemeId=${schemeId}|name=${schemeName}|status=${delResp.status}|body=${delResp.body}")
}
}
}
logger.info("=== SUMMARY ===")
logger.info("Total schemes: ${schemes.size()}")
logger.info("Kept (associated to any projects): ${keptAssociated}")
logger.info("Kept (protected by rules): ${keptProtected}")
logger.info("Delete candidates: ${candidates.size()}")
candidates.each { c ->
logger.info("CANDIDATE|schemeId=${c[0]}|name=${c[1]}|reason=${c[2]}")
}
logger.info("=== DONE ===")

View File

@ -0,0 +1,179 @@
/**
* Field Configuration Scheme Housekeeping (Cloud) Report + optional Delete
* -------------------------------------------------------------------------
* Delete rule:
* - Scheme darf nur gelöscht werden, wenn es mit KEINEM Projekt verknüpft ist.
* - Extra-Schutz über PROTECTED_SCHEME_IDS und PROTECTED_NAME_PATTERNS.
*/
def PROTECTED_SCHEME_IDS = [
// "10000"
]
def PROTECTED_NAME_PATTERNS = [
"default",
"system"
]
def DRY_RUN = true // erst auf false setzen, wenn Report passt
def PROJECT_PAGE_SIZE = 50 // paging für project/search
def SCHEME_PAGE_SIZE = 100 // paging für fieldconfigurationscheme
logger.info("=== Field Configuration Scheme Housekeeping ===")
logger.info("Protected scheme IDs: ${PROTECTED_SCHEME_IDS}")
logger.info("Protected name patterns: ${PROTECTED_NAME_PATTERNS}")
logger.info("DRY_RUN: ${DRY_RUN}")
def isNameProtected = { String name ->
def n = (name ?: "").toLowerCase()
return PROTECTED_NAME_PATTERNS.any { p -> n.contains((p ?: "").toLowerCase()) }
}
def isIdProtected = { String id ->
return PROTECTED_SCHEME_IDS.any { it?.toString() == id?.toString() }
}
/**
* 1) Alle Projekte holen (id + key)
*/
def projects = []
def startAt = 0
while (true) {
def resp = get("/rest/api/3/project/search?startAt=${startAt}&maxResults=${PROJECT_PAGE_SIZE}")
.asObject(Map)
if (resp.status != 200) {
logger.error("ERROR|PROJECT_SEARCH_FAILED|status=${resp.status}|body=${resp.body}")
break
}
def values = resp.body?.values ?: []
projects.addAll(values)
def isLast = resp.body?.isLast
if (isLast == true || values.isEmpty()) break
startAt += PROJECT_PAGE_SIZE
}
logger.info("INFO|TOTAL_PROJECTS|${projects.size()}")
def projectIdToKey = [:]
projects.each { p ->
def pid = p?.id?.toString()
def pkey = p?.key?.toString()
if (pid && pkey) projectIdToKey[pid] = pkey
}
/**
* 2) Mapping: fieldConfigurationSchemeId -> [projectIds]
* (Endpoint braucht projectId, daher über alle Projekte iterieren)
*
* REST: GET /rest/api/3/fieldconfigurationscheme/project?projectId={projectId}
*/
def schemeToProjectIds = [:].withDefault { [] }
projectIdToKey.keySet().each { pid ->
def resp = get("/rest/api/3/fieldconfigurationscheme/project?projectId=${pid}")
.asObject(Map)
if (resp.status != 200) {
logger.warn("WARN|SCHEME_LOOKUP_FAILED|projectId=${pid}|status=${resp.status}")
return
}
def values = resp.body?.values ?: []
values.each { row ->
def schemeId = row?.fieldConfigurationScheme?.id?.toString()
def pids = (row?.projectIds ?: []).collect { it?.toString() }.findAll { it != null }
if (schemeId) {
schemeToProjectIds[schemeId] = (schemeToProjectIds[schemeId] + pids).unique()
}
}
}
/**
* 3) Alle Field Configuration Schemes holen
*
* REST: GET /rest/api/3/fieldconfigurationscheme
*/
def schemes = []
startAt = 0
while (true) {
def resp = get("/rest/api/3/fieldconfigurationscheme?startAt=${startAt}&maxResults=${SCHEME_PAGE_SIZE}")
.asObject(Map)
if (resp.status != 200) {
logger.error("ERROR|SCHEME_LIST_FAILED|status=${resp.status}|body=${resp.body}")
break
}
def values = resp.body?.values ?: []
schemes.addAll(values)
def isLast = resp.body?.isLast
if (isLast == true || values.isEmpty()) break
startAt += SCHEME_PAGE_SIZE
}
logger.info("INFO|TOTAL_SCHEMES|${schemes.size()}")
/**
* 4) Auswertung + optional Delete
*/
def keptAssociated = 0
def keptProtected = 0
def candidates = [] // [id,name,reason]
schemes.each { s ->
def schemeId = s?.id?.toString()
def schemeName = s?.name?.toString()
def assocProjectIds = schemeToProjectIds[schemeId] ?: []
def assocProjectKeys = assocProjectIds.collect { projectIdToKey[it] }.findAll { it != null }.unique()
def nameProtected = isNameProtected(schemeName)
def idProtected = isIdProtected(schemeId)
if (idProtected || nameProtected) {
keptProtected++
def why = []
if (idProtected) why << "ID_PROTECTED"
if (nameProtected) why << "NAME_PROTECTED"
logger.info("KEEP|schemeId=${schemeId}|name=${schemeName}|assocProjects=${assocProjectKeys}|reason=${why}")
return
}
if (!assocProjectIds.isEmpty()) {
keptAssociated++
logger.info("KEEP|schemeId=${schemeId}|name=${schemeName}|assocProjects=${assocProjectKeys}|reason=ASSOCIATED_TO_PROJECTS")
return
}
logger.info("DEL?|schemeId=${schemeId}|name=${schemeName}|assocProjects=[]|reason=UNASSOCIATED")
candidates << [schemeId, schemeName, "UNASSOCIATED"]
if (!DRY_RUN) {
def delResp = delete("/rest/api/3/fieldconfigurationscheme/${schemeId}")
.asString()
if (delResp.status == 204) {
logger.info("DEL|OK|schemeId=${schemeId}|name=${schemeName}")
} else {
logger.error("DEL|FAIL|schemeId=${schemeId}|name=${schemeName}|status=${delResp.status}|body=${delResp.body}")
}
}
}
logger.info("=== SUMMARY ===")
logger.info("Total schemes: ${schemes.size()}")
logger.info("Kept (associated to any projects): ${keptAssociated}")
logger.info("Kept (protected by rules): ${keptProtected}")
logger.info("Delete candidates: ${candidates.size()}")
candidates.each { c ->
logger.info("CANDIDATE|schemeId=${c[0]}|name=${c[1]}|reason=${c[2]}")
}
logger.info("=== DONE ===")

View File

@ -0,0 +1,180 @@
/**
* Screen Scheme Housekeeping (Cloud) über Issue Type Screen Scheme Mappings (zuverlässig)
* --------------------------------------------------------------------------------------
* Löschen nur, wenn ein Screen Scheme nirgendwo in IssueTypeScreenScheme-Mappings referenziert ist.
* Jira verhindert Delete sowieso, wenn es noch referenziert wird. :contentReference[oaicite:2]{index=2}
*/
def PROJECT_KEYS = ["NIN","NICS","NINPDS","NINPDSARC","CS","CRON"]
def PROTECTED_SCHEME_IDS = [
// "1" // Default Screen Scheme ggf. hart schützen
]
def PROTECTED_NAME_PATTERNS = [
"default",
"system",
"jira"
]
def DRY_RUN = true
def PROJECT_PAGE_SIZE = 50
def SCREEN_SCHEME_PAGE_SIZE = 100
def MAPPING_PAGE_SIZE = 100
logger.info("=== Screen Scheme Housekeeping (via IssueTypeScreenScheme mappings) ===")
logger.info("Projects: ${PROJECT_KEYS}")
logger.info("Protected scheme IDs: ${PROTECTED_SCHEME_IDS}")
logger.info("Protected name patterns: ${PROTECTED_NAME_PATTERNS}")
logger.info("DRY_RUN: ${DRY_RUN}")
def isNameProtected = { String name ->
def n = (name ?: "").toLowerCase()
PROTECTED_NAME_PATTERNS.any { p -> n.contains((p ?: "").toLowerCase()) }
}
def isIdProtected = { String id ->
PROTECTED_SCHEME_IDS.any { it?.toString() == id?.toString() }
}
/**
* 1) Project Key -> Project ID
*/
def projectIds = [:] // key -> id
PROJECT_KEYS.each { key ->
def resp = get("/rest/api/3/project/${key}").asObject(Map)
if (resp.status == 200) {
projectIds[key] = resp.body?.id?.toString()
logger.info("INFO|PROJECT|${key}|id=${projectIds[key]}")
} else {
logger.warn("WARN|PROJECT_LOOKUP_FAILED|${key}|status=${resp.status}")
}
}
def validProjectIds = projectIds.values().findAll { it != null }.unique()
/**
* 2) Aus Projekten die IssueTypeScreenSchemeIds einsammeln
* GET /rest/api/3/issuetypescreenscheme/project?projectId=...
*/
def issueTypeScreenSchemeIds = [] as Set
validProjectIds.each { pid ->
def resp = get("/rest/api/3/issuetypescreenscheme/project?projectId=${pid}")
.asObject(Map)
if (resp.status != 200) {
logger.warn("WARN|ITSCS_FOR_PROJECT_FAILED|projectId=${pid}|status=${resp.status}")
return
}
(resp.body?.values ?: []).each { row ->
def itscsId = row?.issueTypeScreenScheme?.id?.toString()
if (itscsId) issueTypeScreenSchemeIds << itscsId
}
}
logger.info("INFO|ISSUETYPE_SCREENSCHEME_IDS|count=${issueTypeScreenSchemeIds.size()}|ids=${issueTypeScreenSchemeIds}")
/**
* 3) Für jedes IssueTypeScreenSchemeId: Mappings holen und alle screenSchemeIds sammeln
* GET /rest/api/3/issuetypescreenscheme/mapping?issueTypeScreenSchemeId=... (paging)
*/
def referencedScreenSchemeIds = [] as Set
issueTypeScreenSchemeIds.each { itscsId ->
def startAt = 0
while (true) {
def resp = get("/rest/api/3/issuetypescreenscheme/mapping?issueTypeScreenSchemeId=${itscsId}&startAt=${startAt}&maxResults=${MAPPING_PAGE_SIZE}")
.asObject(Map)
if (resp.status != 200) {
logger.warn("WARN|ITSCS_MAPPING_FAILED|itscsId=${itscsId}|status=${resp.status}")
break
}
def values = resp.body?.values ?: []
values.each { m ->
def screenSchemeId = m?.screenSchemeId?.toString()
if (screenSchemeId) referencedScreenSchemeIds << screenSchemeId
}
def isLast = resp.body?.isLast
if (isLast == true || values.isEmpty()) break
startAt += MAPPING_PAGE_SIZE
}
}
logger.info("INFO|REFERENCED_SCREENSCHEME_IDS|count=${referencedScreenSchemeIds.size()}")
/**
* 4) Alle Screen Schemes holen, Kandidaten bestimmen
*/
def allScreenSchemes = []
def startAt = 0
while (true) {
def resp = get("/rest/api/3/screenscheme?startAt=${startAt}&maxResults=${SCREEN_SCHEME_PAGE_SIZE}")
.asObject(Map)
if (resp.status != 200) {
logger.error("ERROR|SCREENSCHEME_LIST_FAILED|status=${resp.status}|body=${resp.body}")
break
}
def values = resp.body?.values ?: []
allScreenSchemes.addAll(values)
def isLast = resp.body?.isLast
if (isLast == true || values.isEmpty()) break
startAt += SCREEN_SCHEME_PAGE_SIZE
}
logger.info("INFO|TOTAL_SCREEN_SCHEMES|${allScreenSchemes.size()}")
def keptReferenced = 0
def keptProtected = 0
def candidates = [] // [id,name,reason]
allScreenSchemes.each { ss ->
def id = ss?.id?.toString()
def name = ss?.name?.toString()
def nameProtected = isNameProtected(name)
def idProtected = isIdProtected(id)
if (nameProtected || idProtected) {
keptProtected++
def why = []
if (idProtected) why << "ID_PROTECTED"
if (nameProtected) why << "NAME_PROTECTED"
logger.info("KEEP|screenSchemeId=${id}|name=${name}|reason=${why}")
return
}
if (referencedScreenSchemeIds.contains(id)) {
keptReferenced++
logger.info("KEEP|screenSchemeId=${id}|name=${name}|reason=REFERENCED_BY_ISSUETYPE_SCREENSCHEME")
return
}
logger.info("DEL?|screenSchemeId=${id}|name=${name}|reason=NOT_REFERENCED_ANYWHERE")
candidates << [id, name, "NOT_REFERENCED_ANYWHERE"]
if (!DRY_RUN) {
def delResp = delete("/rest/api/3/screenscheme/${id}").asString()
if (delResp.status == 204) {
logger.info("DEL|OK|screenSchemeId=${id}|name=${name}")
} else {
logger.error("DEL|FAIL|screenSchemeId=${id}|name=${name}|status=${delResp.status}|body=${delResp.body}")
}
}
}
logger.info("=== SUMMARY ===")
logger.info("Total screen schemes: ${allScreenSchemes.size()}")
logger.info("Kept (referenced): ${keptReferenced}")
logger.info("Kept (protected by rules): ${keptProtected}")
logger.info("Delete candidates: ${candidates.size()}")
candidates.each { c ->
logger.info("CANDIDATE|screenSchemeId=${c[0]}|name=${c[1]}|reason=${c[2]}")
}
logger.info("=== DONE ===")

View File

@ -0,0 +1,183 @@
/**
* Workflow Scheme Housekeeping (Cloud) Report + optional Delete
* --------------------------------------------------------------
* Delete rule:
* - Scheme darf nur gelöscht werden, wenn es mit KEINEM Projekt verknüpft ist.
* - Extra-Schutz über PROTECTED_SCHEME_IDS und PROTECTED_NAME_PATTERNS.
*
* Mapping:
* - Wir holen alle Projekte (id + key)
* - Dann: GET /rest/api/3/workflowscheme/project?projectId=...
* -> liefert für das Projekt die Workflow-Scheme-Zuordnung
*/
def PROTECTED_SCHEME_IDS = [
// "10000"
]
def PROTECTED_NAME_PATTERNS = [
"default",
"classic"
]
def DRY_RUN = true
def PROJECT_PAGE_SIZE = 50
def SCHEME_PAGE_SIZE = 100
logger.info("=== Workflow Scheme Housekeeping ===")
logger.info("Protected scheme IDs: ${PROTECTED_SCHEME_IDS}")
logger.info("Protected name patterns: ${PROTECTED_NAME_PATTERNS}")
logger.info("DRY_RUN: ${DRY_RUN}")
def isNameProtected = { String name ->
def n = (name ?: "").toLowerCase()
return PROTECTED_NAME_PATTERNS.any { p -> n.contains((p ?: "").toLowerCase()) }
}
def isIdProtected = { String id ->
return PROTECTED_SCHEME_IDS.any { it?.toString() == id?.toString() }
}
/**
* 1) Alle Projekte holen (id + key)
*/
def projects = []
def startAt = 0
while (true) {
def resp = get("/rest/api/3/project/search?startAt=${startAt}&maxResults=${PROJECT_PAGE_SIZE}")
.asObject(Map)
if (resp.status != 200) {
logger.error("ERROR|PROJECT_SEARCH_FAILED|status=${resp.status}|body=${resp.body}")
break
}
def values = resp.body?.values ?: []
projects.addAll(values)
def isLast = resp.body?.isLast
if (isLast == true || values.isEmpty()) break
startAt += PROJECT_PAGE_SIZE
}
logger.info("INFO|TOTAL_PROJECTS|${projects.size()}")
def projectIdToKey = [:]
projects.each { p ->
def pid = p?.id?.toString()
def pkey = p?.key?.toString()
if (pid && pkey) projectIdToKey[pid] = pkey
}
/**
* 2) Mapping: workflowSchemeId -> [projectIds]
* REST: GET /rest/api/3/workflowscheme/project?projectId={projectId}
*/
def schemeToProjectIds = [:].withDefault { [] }
projectIdToKey.keySet().each { pid ->
def resp = get("/rest/api/3/workflowscheme/project?projectId=${pid}")
.asObject(Map)
if (resp.status != 200) {
logger.warn("WARN|WFSCHEME_LOOKUP_FAILED|projectId=${pid}|status=${resp.status}")
return
}
// Response: values: [ { workflowScheme: {id,name,...}, projectIds:[...] }, ... ]
def values = resp.body?.values ?: []
values.each { row ->
def schemeId = row?.workflowScheme?.id?.toString()
def pids = (row?.projectIds ?: []).collect { it?.toString() }.findAll { it != null }
if (schemeId) {
schemeToProjectIds[schemeId] = (schemeToProjectIds[schemeId] + pids).unique()
}
}
}
/**
* 3) Alle Workflow Schemes holen
* REST: GET /rest/api/3/workflowscheme
*/
def schemes = []
startAt = 0
while (true) {
def resp = get("/rest/api/3/workflowscheme?startAt=${startAt}&maxResults=${SCHEME_PAGE_SIZE}")
.asObject(Map)
if (resp.status != 200) {
logger.error("ERROR|WFSCHEME_LIST_FAILED|status=${resp.status}|body=${resp.body}")
break
}
def values = resp.body?.values ?: []
schemes.addAll(values)
def isLast = resp.body?.isLast
if (isLast == true || values.isEmpty()) break
startAt += SCHEME_PAGE_SIZE
}
logger.info("INFO|TOTAL_WORKFLOW_SCHEMES|${schemes.size()}")
/**
* 4) Auswertung + optional Delete
*/
def keptAssociated = 0
def keptProtected = 0
def candidates = [] // [id,name,reason]
schemes.each { s ->
def schemeId = s?.id?.toString()
def schemeName = s?.name?.toString()
def assocProjectIds = schemeToProjectIds[schemeId] ?: []
def assocProjectKeys = assocProjectIds.collect { projectIdToKey[it] }.findAll { it != null }.unique()
def nameProtected = isNameProtected(schemeName)
def idProtected = isIdProtected(schemeId)
if (idProtected || nameProtected) {
keptProtected++
def why = []
if (idProtected) why << "ID_PROTECTED"
if (nameProtected) why << "NAME_PROTECTED"
logger.info("KEEP|schemeId=${schemeId}|name=${schemeName}|assocProjects=${assocProjectKeys}|reason=${why}")
return
}
if (!assocProjectIds.isEmpty()) {
keptAssociated++
logger.info("KEEP|schemeId=${schemeId}|name=${schemeName}|assocProjects=${assocProjectKeys}|reason=ASSOCIATED_TO_PROJECTS")
return
}
logger.info("DEL?|schemeId=${schemeId}|name=${schemeName}|assocProjects=[]|reason=UNASSOCIATED")
candidates << [schemeId, schemeName, "UNASSOCIATED"]
if (!DRY_RUN) {
def delResp = delete("/rest/api/3/workflowscheme/${schemeId}")
.asString()
if (delResp.status == 204) {
logger.info("DEL|OK|schemeId=${schemeId}|name=${schemeName}")
} else {
logger.error("DEL|FAIL|schemeId=${schemeId}|name=${schemeName}|status=${delResp.status}|body=${delResp.body}")
}
}
}
logger.info("=== SUMMARY ===")
logger.info("Total workflow schemes: ${schemes.size()}")
logger.info("Kept (associated to any projects): ${keptAssociated}")
logger.info("Kept (protected by rules): ${keptProtected}")
logger.info("Delete candidates: ${candidates.size()}")
candidates.each { c ->
logger.info("CANDIDATE|schemeId=${c[0]}|name=${c[1]}|reason=${c[2]}")
}
logger.info("=== DONE ===")

View File

@ -0,0 +1,163 @@
import java.net.URLEncoder
/**
* Workflow Housekeeping (Cloud) FIX: workflowSchemes.values korrekt auswerten
* ----------------------------------------------------------------------------
* Search: GET /rest/api/3/workflow/search -> values[].id.{name,entityId}
* Usage: GET /rest/api/3/workflow/{workflowId}/workflowSchemes
* Delete: DEL /rest/api/3/workflow/{workflowId}
*
* WICHTIG: Usage-Response hat workflowSchemes.values (nicht body.values).
*/
def DRY_RUN = true // <<< für "hart löschen" auf false
def PROTECTED_WORKFLOW_ENTITY_IDS = [
// z.B. "ec4480b2-623a-4b9b-78c0-2af0d15196ff" // classic default workflow
]
def PROTECTED_NAME_PATTERNS = [
"classic"
]
def PAGE_SIZE = 50
def USAGE_PAGE_SIZE = 50
logger.info("=== Workflow Housekeeping (FIX usage parsing) ===")
logger.info("Protected workflow entityIds: ${PROTECTED_WORKFLOW_ENTITY_IDS}")
logger.info("Protected name patterns: ${PROTECTED_NAME_PATTERNS}")
logger.info("DRY_RUN: ${DRY_RUN}")
def isNameProtected = { String name ->
def n = (name ?: "").toLowerCase()
PROTECTED_NAME_PATTERNS.any { p -> n.contains((p ?: "").toLowerCase()) }
}
def isEntityIdProtected = { String entityId ->
PROTECTED_WORKFLOW_ENTITY_IDS.any { it?.toString() == entityId?.toString() }
}
// Encode für Pfadsegmente, damit auch "Builds Workflow" kein URI-Problem macht
def encPath = { String s ->
URLEncoder.encode(s ?: "", "UTF-8").replace("+", "%20")
}
/**
* Usage korrekt lesen:
* body.workflowSchemes.values
* body.workflowSchemes.nextPageToken
*
* Doku: GET /rest/api/3/workflow/{workflowId}/workflowSchemes :contentReference[oaicite:1]{index=1}
*/
def getWorkflowSchemeUsageCount = { String workflowId ->
int count = 0
String token = null
while (true) {
def url = "/rest/api/3/workflow/${encPath(workflowId)}/workflowSchemes?maxResults=${USAGE_PAGE_SIZE}" +
(token ? "&nextPageToken=${URLEncoder.encode(token, 'UTF-8')}" : "")
def resp = get(url).asObject(Map)
if (resp.status != 200) {
return [failed: true, status: resp.status, body: resp.body, count: count]
}
def body = resp.body ?: [:]
def ws = body?.workflowSchemes ?: [:]
def values = ws?.values ?: []
count += values.size()
token = ws?.nextPageToken
if (!token) break
}
return [failed: false, count: count]
}
// 1) Workflows holen (dein JSON: values[].id.{name,entityId})
def raw = []
def startAt = 0
while (true) {
def resp = get("/rest/api/3/workflow/search?startAt=${startAt}&maxResults=${PAGE_SIZE}")
.asObject(Map)
if (resp.status != 200) {
logger.error("ERROR|WF_SEARCH_FAILED|status=${resp.status}|body=${resp.body}")
return
}
def values = resp.body?.values ?: []
raw.addAll(values)
def isLast = resp.body?.isLast
if (isLast == true || values.isEmpty()) break
startAt += PAGE_SIZE
}
logger.info("INFO|TOTAL_ITEMS_FROM_SEARCH|${raw.size()}")
def workflows = raw.collect { wf ->
def idObj = wf?.id
def name = (idObj instanceof Map) ? idObj?.name?.toString() : null
def entityId = (idObj instanceof Map) ? idObj?.entityId?.toString() : null
return [name: name, entityId: entityId]
}.findAll { it.entityId && it.name }
logger.info("INFO|REAL_WORKFLOWS|${workflows.size()}")
logger.info("INFO|SKIPPED_ITEMS_NO_NAME_OR_ENTITYID|${raw.size() - workflows.size()}")
// 2) Auswertung + optional Delete
def deleted = 0
def keptProtected = 0
def keptUsed = 0
def keptUsageLookupFailed = 0
def deleteFailures = 0
def candidates = 0
workflows.each { wf ->
def name = wf.name
def entityId = wf.entityId
if (isEntityIdProtected(entityId) || isNameProtected(name)) {
keptProtected++
logger.info("KEEP|entityId=${entityId}|name=${name}|reason=PROTECTED")
return
}
def usage = getWorkflowSchemeUsageCount(entityId)
if (usage.failed) {
keptUsageLookupFailed++
logger.info("KEEP|entityId=${entityId}|name=${name}|reason=USAGE_LOOKUP_FAILED|status=${usage.status}")
return
}
if (usage.count > 0) {
keptUsed++
logger.info("KEEP|entityId=${entityId}|name=${name}|reason=USED_BY_SCHEMES|usedBySchemes=${usage.count}")
return
}
candidates++
logger.info("DEL?|entityId=${entityId}|name=${name}|reason=UNUSED_NO_SCHEME_ASSOC")
if (!DRY_RUN) {
def delResp = delete("/rest/api/3/workflow/${encPath(entityId)}").asString()
if (delResp.status == 204) {
deleted++
logger.info("DEL|OK|entityId=${entityId}|name=${name}")
} else {
deleteFailures++
logger.error("DEL|FAIL|entityId=${entityId}|name=${name}|status=${delResp.status}|body=${delResp.body}")
}
}
}
logger.info("=== SUMMARY ===")
logger.info("Real workflows processed: ${workflows.size()}")
logger.info("Delete candidates: ${candidates}")
logger.info("Deleted: ${deleted}")
logger.info("Kept (protected): ${keptProtected}")
logger.info("Kept (used by schemes): ${keptUsed}")
logger.info("Kept (usage lookup failed): ${keptUsageLookupFailed}")
logger.info("Delete failures: ${deleteFailures}")
logger.info("=== DONE ===")

View File

@ -0,0 +1,222 @@
import java.net.URLEncoder
/**
* Screens Housekeeping (Cloud) Kandidatenliste
* -----------------------------------------------------------
* Ziel: Screens finden, die in keinem Screen Scheme referenziert sind.
*
* Hinweis:
* - Jira verhindert Delete, wenn Screen in Screen Scheme, Workflow oder Workflow Draft verwendet wird. :contentReference[oaicite:2]{index=2}
* - Workflow/Draft-Usage kann man per REST nicht zuverlässig auflisten. :contentReference[oaicite:3]{index=3}
* => Wir prüfen hier: "nicht in Screen Schemes". Beim echten Delete fängt Jira dann ggf. "used in workflow/draft" ab.
************************************************************************************************************************
* !!!! ES BESTEHT WEITERHIN DIE GEFAHR, DASS SCREENS GELÖSCHT WERDEN, DIE MAN WEITERHIN FÜR TRANSITIONEN BENÖTIGT!!!
* !!!! DAHER IST DIESES SCRIPT MIT ÄUßERSTER SORGFALT ZU VERWENDEN!!!!
************************************************************************************************************************
*/
def DRY_RUN = true
def PROTECTED_SCREEN_IDS = [
// "1"
]
def PROTECTED_NAME_PATTERNS = [
"default",
"NICS: ",
"NIN: ",
"NINPDS",
"CS: ",
"PDS:"
]
def PAGE_SIZE = 100
logger.info("=== Screens Housekeeping ===")
logger.info("DRY_RUN: ${DRY_RUN}")
logger.info("Protected screen IDs: ${PROTECTED_SCREEN_IDS}")
logger.info("Protected name patterns: ${PROTECTED_NAME_PATTERNS}")
def isNameProtected = { String name ->
def n = (name ?: "").toLowerCase()
PROTECTED_NAME_PATTERNS.any { p -> n.contains((p ?: "").toLowerCase()) }
}
def isIdProtected = { String id ->
PROTECTED_SCREEN_IDS.any { it?.toString() == id?.toString() }
}
// für Pfadsegmente (IDs sind numerisch, aber sicher ist sicher)
def encPath = { String s ->
URLEncoder.encode(s ?: "", "UTF-8").replace("+", "%20")
}
/**
* Recursively collect "screen id" values from a map:
* - Viele Jira Responses haben z.B. defaultScreenId / screenId / createScreenId etc.
* - Wir sammeln konservativ: keys die "screen" und "id" enthalten.
*/
def collectScreenIdsRecursive
collectScreenIdsRecursive = { Object node, Set<String> out ->
if (node == null) return
if (node instanceof Map) {
(node as Map).each { k, v ->
def key = k?.toString()?.toLowerCase()
if (key && key.contains("screen") && key.contains("id")) {
if (v != null) out << v.toString()
}
collectScreenIdsRecursive(v, out)
}
} else if (node instanceof List) {
(node as List).each { item -> collectScreenIdsRecursive(item, out) }
}
}
/**
* 1) Alle Screens holen
* API: GET /rest/api/3/screens (plural). :contentReference[oaicite:4]{index=4}
*/
def screens = []
def startAt = 0
while (true) {
def resp = get("/rest/api/3/screens?startAt=${startAt}&maxResults=${PAGE_SIZE}").asObject(Map)
if (resp.status != 200) {
logger.error("ERROR|SCREENS_LIST_FAILED|status=${resp.status}|body=${resp.body}")
return
}
def values = resp.body?.values ?: []
screens.addAll(values)
def isLast = resp.body?.isLast
if (isLast == true || values.isEmpty()) break
startAt += PAGE_SIZE
}
logger.info("INFO|TOTAL_SCREENS|${screens.size()}")
/**
* 2) Alle Screen Schemes holen
* API: GET /rest/api/3/screenscheme :contentReference[oaicite:5]{index=5}
*/
def screenSchemes = []
startAt = 0
while (true) {
def resp = get("/rest/api/3/screenscheme?startAt=${startAt}&maxResults=${PAGE_SIZE}").asObject(Map)
if (resp.status != 200) {
logger.error("ERROR|SCREENSCHEME_LIST_FAILED|status=${resp.status}|body=${resp.body}")
return
}
def values = resp.body?.values ?: []
screenSchemes.addAll(values)
def isLast = resp.body?.isLast
if (isLast == true || values.isEmpty()) break
startAt += PAGE_SIZE
}
logger.info("INFO|TOTAL_SCREEN_SCHEMES|${screenSchemes.size()}")
/**
* 3) Referenz-Mapping: screenId -> [schemeNames...]
* Wir holen pro Scheme die Details und sammeln alle enthaltenen screenIds.
*/
def usedBySchemeNames = [:].withDefault { [] as List<String> } // screenId -> schemeNames
def usedScreenIds = new HashSet<String>()
screenSchemes.each { ss ->
def ssId = ss?.id?.toString()
def ssName = ss?.name?.toString() ?: "?"
if (!ssId) return
def resp = get("/rest/api/3/screenscheme/${encPath(ssId)}").asObject(Map)
if (resp.status != 200) {
logger.warn("WARN|SCREENSCHEME_DETAILS_FAILED|schemeId=${ssId}|name=${ssName}|status=${resp.status}")
return
}
def found = new HashSet<String>()
collectScreenIdsRecursive(resp.body, found)
found.each { sid ->
usedScreenIds << sid
usedBySchemeNames[sid] = (usedBySchemeNames[sid] + ssName).unique()
}
}
logger.info("INFO|USED_SCREEN_IDS|${usedScreenIds.size()}")
/**
* 4) Auswertung
* Kandidat = Screen wird in keinem Screen Scheme referenziert + nicht protected
*/
def candidates = []
def keptProtected = 0
def keptUsed = 0
screens.each { s ->
def screenId = s?.id?.toString()
def screenName = s?.name?.toString()
if (!screenId) return
if (isIdProtected(screenId) || isNameProtected(screenName)) {
keptProtected++
logger.info("KEEP|screenId=${screenId}|name=${screenName}|reason=PROTECTED")
return
}
def schemes = usedBySchemeNames[screenId] ?: []
if (!schemes.isEmpty()) {
keptUsed++
// optional kurz halten: nur bis zu 5 scheme names
def preview = schemes.take(5)
logger.info("KEEP|screenId=${screenId}|name=${screenName}|reason=USED_BY_SCREEN_SCHEMES|count=${schemes.size()}|examples=${preview}")
return
}
// sauberer Kandidat (bezogen auf Screen Schemes)
candidates << [id: screenId, name: screenName]
}
/**
* 5) Saubere Kandidatenliste (Dry run)
* 1 Zeile pro Screen, gut copy/paste
*/
candidates = candidates.sort { (it.name ?: "") as String }
logger.info("=== CANDIDATES (NOT IN ANY SCREEN SCHEME) ===")
candidates.each { c ->
logger.info("CANDIDATE|screenId=${c.id}|name=${c.name}")
}
logger.info("=== SUMMARY ===")
logger.info("Total screens: ${screens.size()}")
logger.info("Kept (protected): ${keptProtected}")
logger.info("Kept (used by screen schemes): ${keptUsed}")
logger.info("Candidates (no screen scheme refs): ${candidates.size()}")
/**
* 6) Optional delete
* API: DELETE /rest/api/3/screens/{screenId} :contentReference[oaicite:6]{index=6}
* Jira wird blocken, wenn Workflow/Draft etc. doch referenziert.
*/
if (!DRY_RUN) {
logger.info("=== DELETE PHASE ===")
def deleted = 0
def failed = 0
candidates.each { c ->
def delResp = delete("/rest/api/3/screens/${encPath(c.id)}").asString()
if (delResp.status == 204) {
deleted++
logger.info("DEL|OK|screenId=${c.id}|name=${c.name}")
} else {
failed++
logger.error("DEL|FAIL|screenId=${c.id}|name=${c.name}|status=${delResp.status}|body=${delResp.body}")
}
}
logger.info("=== DELETE SUMMARY ===")
logger.info("Deleted: ${deleted}")
logger.info("Delete failures: ${failed}")
}
logger.info("=== DONE ===")

View File

@ -1,115 +0,0 @@
import groovy.json.JsonOutput
// ------------------ Konfig ------------------
boolean DRY_RUN = true
int PAGE_SIZE = 50
Set<String> EXCLUDE_BY_NAME = [] as Set // optional Namen schützen
// ------------------ Logging -----------------
void logInfo(String m){ try{ logger.info(m) }catch(e){ println m } }
void logWarn(String m){ try{ logger.warn(m) }catch(e){ println "WARN: " + m } }
void logErr (String m){ try{ logger.error(m)}catch(e){ println "ERR: " + m } }
// ------------------ HTTP Helpers -----------
Map getAsMap(String path, Map q=[:]) {
def req = get(path)
q.each{ k,v -> req = req.queryString(k, v) }
def r = req.asObject(Map)
if (r.status != 200) throw new RuntimeException("GET " + path + " failed: HTTP " + r.status + " :: " + r.body)
(r.body ?: [:]) as Map
}
List<Map> pagedGetValues(String path, int pageSize=50) {
int startAt = 0; List<Map> all = []
while (true) {
Map body = getAsMap(path, [startAt:startAt, maxResults:pageSize])
List vals = (body.values ?: []) as List
all.addAll(vals as List<Map>)
int total = (body.total ?: (startAt + vals.size())) as int
int nextStart = startAt + ((body.maxResults ?: vals.size()) as int)
if (vals.isEmpty() || nextStart >= total) break
startAt = nextStart
}
all
}
// ------------------ Fetchers ----------------
List<Map> fetchITSS(int pageSize){
logInfo("Lade Issue Type Screen Schemes…")
def list = pagedGetValues("/rest/api/3/issuetypescreenscheme", pageSize)
logInfo("ITSS gefunden: " + list.size())
list
}
List<Map> fetchProjects(int pageSize){
logInfo("Lade Projekte…")
def list = pagedGetValues("/rest/api/3/project/search", pageSize)
logInfo("Projekte gefunden: " + list.size())
list
}
Long fetchITSSForProject(Long projectId){
// Liefert die ITSS-ID, die einem Projekt zugewiesen ist
def m = getAsMap("/rest/api/3/project/${projectId}/issuetypescreenscheme")
def id = m.get("issueTypeScreenSchemeId")
return (id == null ? null : Long.valueOf(id.toString()))
}
// ------------------ Delete ------------------
boolean deleteITSS(long id, String name){
def resp = delete("/rest/api/3/issuetypescreenscheme/${id}").asString()
if (resp.status in [200,204]) { logInfo("Gelöscht: [${id}] ${name}"); return true }
logWarn("Nicht gelöscht [${id}] ${name} :: HTTP ${resp.status} :: ${resp.body}")
false
}
// ------------------ Main --------------------
void runITSSCleanup(boolean dryRun, int pageSize, Set<String> excludeByName){
def itss = fetchITSS(pageSize)
if (itss.isEmpty()){ logInfo("Keine ITSS vorhanden nichts zu tun."); return }
Map<Long,Map> itssById = [:]
itss.each{ Map x -> if (x.id!=null) itssById[Long.valueOf(x.id.toString())] = x }
def projects = fetchProjects(pageSize)
Set<Long> referenced = new LinkedHashSet<>()
projects.each{ Map p ->
def pid = p.get("id"); if (pid==null) return
try {
Long ref = fetchITSSForProject(Long.valueOf(pid.toString()))
if (ref!=null) referenced << ref
} catch (Exception ex) {
logWarn("ITSS-Mapping für Projekt ${p.key ?: pid} nicht lesbar: " + ex.message)
}
}
logInfo("Referenzierte ITSS gesamt: " + referenced.size())
List<Map> candidates = []
itssById.each{ Long id, Map row ->
String name = (row.name ?: "") as String
if (!referenced.contains(id) && !excludeByName.contains(name)){
candidates << [id:id, name:name, description:(row.description ?: "")]
}
}
candidates.sort{ a,b -> a.name <=> b.name }
if (candidates.isEmpty()){ logInfo("Keine ungenutzten ITSS gefunden. ✅"); return }
logWarn("Ungenutzte ITSS (${candidates.size()}):")
candidates.each{ c -> logWarn(" - [${c.id}] ${c.name}") }
if (dryRun){
logInfo("Dry-Run aktiv → nichts gelöscht.")
logInfo("JSON:\n" + JsonOutput.prettyPrint(JsonOutput.toJson(candidates)))
return
}
int deleted=0, skipped=0
candidates.each{ c ->
try {
if (deleteITSS((c.id as Long), c.name.toString())) deleted++ else skipped++
} catch (Exception ex){
skipped++; logErr("Fehler beim Löschen [${c.id}] ${c.name} :: " + ex.message)
}
}
logWarn("Fertig. Ergebnis: deleted=${deleted}, skipped=${skipped}")
}
// ---- Start ----
runITSSCleanup(DRY_RUN, PAGE_SIZE, EXCLUDE_BY_NAME)

View File

@ -1,224 +0,0 @@
// -----------------------------------------------------------------------------
// Housekeeping: Inaktive Workflow Schemes (ohne Projektzuordnung) löschen
// Jira Cloud - ScriptRunner Console
// -----------------------------------------------------------------------------
import groovy.json.JsonOutput
// --- Konfiguration -----------------------------------------------------------
// false -> inaktive Workflow Schemes (ohne EXCLUDED_IDS) werden GELÖSCHT
// true -> nur Testlauf, es wird NICHT gelöscht
final boolean DRY_RUN = true
// Workflow-Scheme-IDs, die NIEMALS gelöscht werden sollen
// (z.B. Default-System-Schema; ID bitte ggf. anpassen/ergänzen)
final Set<Long> EXCLUDED_IDS = [10000L] as Set
// --- Schritt 1: Alle Workflow Schemes laden (paginiert) ---------------------
List<Map> allSchemes = []
int startAt = 0
int maxResults = 50
boolean finished = false
while (!finished) {
def resp = get("/rest/api/3/workflowscheme?startAt=${startAt}&maxResults=${maxResults}")
.asObject(Map)
if (resp.status != 200) {
logger.error("Konnte Workflow Schemes nicht laden (startAt=${startAt}): ${resp.status} - ${resp.body}")
return "Fehler beim Laden der Workflow Schemes. Siehe Log."
}
def body = resp.body ?: [:]
def values = (body.values ?: []) as List<Map>
allSchemes.addAll(values)
boolean isLast = (body.isLast == true)
int total = (body.total ?: (startAt + values.size())) as int
logger.info "Workflow Schemes geladen: ${allSchemes.size()} (total ~ ${total}), isLast=${isLast}"
if (isLast || values.isEmpty()) {
finished = true
} else {
startAt += maxResults
if (startAt >= total) {
finished = true
}
}
}
logger.info "Anzahl Workflow Schemes insgesamt: ${allSchemes.size()}"
// Map: schemeId -> [scheme: <Objekt>, used: boolean, projects: Set<projectKey>]
def schemeUsage = allSchemes.collectEntries { scheme ->
Long id = (scheme.id as Long)
[
(id): [
scheme : scheme,
used : false,
projects: [] as Set<String>
]
]
}
// --- Schritt 2: Mappings Workflow Scheme <-> Projekte laden -----------------
//
// GET /rest/api/3/workflowscheme/project
// liefert PageBean mit values[ { workflowSchemeId, projectId, projectKey, ... } ]
List<Map> allMappings = []
startAt = 0
finished = false
while (!finished) {
def resp = get("/rest/api/3/workflowscheme/project?startAt=${startAt}&maxResults=${maxResults}")
.asObject(Map)
if (resp.status != 200) {
logger.error("Konnte Workflow-Scheme-Mappings nicht laden (startAt=${startAt}): ${resp.status} - ${resp.body}")
break
}
def body = resp.body ?: [:]
def values = (body.values ?: []) as List<Map>
allMappings.addAll(values)
boolean isLast = (body.isLast == true)
int total = (body.total ?: (startAt + values.size())) as int
logger.info "Workflow-Scheme-Mappings geladen: ${allMappings.size()} (total ~ ${total}), isLast=${isLast}"
if (isLast || values.isEmpty()) {
finished = true
} else {
startAt += maxResults
if (startAt >= total) {
finished = true
}
}
}
// Mappings in schemeUsage eintragen
allMappings.each { m ->
Long schemeId = (m.workflowSchemeId as Long)
String projKey = m.projectKey?.toString()
def entry = schemeUsage[schemeId]
if (entry) {
entry.used = true
if (projKey) {
entry.projects << projKey
}
} else {
logger.warn "Mapping gefunden für Workflow Scheme ID=${schemeId}, das nicht in allSchemes war. Projekt=${projKey}"
}
}
def projectsWithWorkflowScheme = allMappings.collect { it.projectKey }.findAll { it }.toSet()
logger.info "Anzahl Projekte mit Workflow Scheme: ${projectsWithWorkflowScheme.size()}"
// --- Schritt 3: Inaktive (unbenutzte & nicht ausgeschlossene) Schemes -------
def inactive = schemeUsage.values()
.findAll { entry ->
Long id = (entry.scheme.id as Long)
!entry.used && !EXCLUDED_IDS.contains(id)
}
.sort { it.scheme.name?.toString()?.toLowerCase() }
logger.info "Ausgeschlossene Workflow-Scheme-IDs : ${EXCLUDED_IDS.join(', ')}"
logger.info "Inaktive Workflow Schemes (Kandidaten): ${inactive.size()}"
// --- Schritt 4: Optional löschen --------------------------------------------
List<Map> deleted = []
List<Map> failed = []
if (!DRY_RUN) {
inactive.each { entry ->
def s = entry.scheme
Long id = (s.id as Long)
logger.info "Lösche Workflow Scheme ID=${id}, Name=\"${s.name}\" ..."
def delResp = delete("/rest/api/3/workflowscheme/${id}")
.asString()
if (delResp.status in [200, 204]) {
logger.info "Erfolgreich gelöscht: ID=${id}, Name=\"${s.name}\""
deleted << [
id : id,
name : s.name,
description: s.description
]
} else {
logger.warn "Löschen fehlgeschlagen für ID=${id}, Name=\"${s.name}\": Status=${delResp.status}, Body=${delResp.body}"
failed << [
id : id,
name : s.name,
status : delResp.status,
body : delResp.body
]
}
}
} else {
logger.info "DRY_RUN = true -> Es wird NICHT gelöscht, nur Kandidaten ermittelt."
}
// --- Schritt 5: Zusammenfassung ---------------------------------------------
def lines = []
lines << "=== Workflow Schemes Housekeeping ==="
lines << "DRY_RUN : ${DRY_RUN}"
lines << "Gesamt Workflow Schemes : ${allSchemes.size()}"
lines << "Projekte mit Scheme-Mapping : ${projectsWithWorkflowScheme.size()}"
lines << "Ausgeschlossene IDs : ${EXCLUDED_IDS.join(', ')}"
lines << "Inaktive Kandidaten : ${inactive.size()}"
if (!DRY_RUN) {
lines << "Gelöscht : ${deleted.size()}"
lines << "Fehlgeschlagen : ${failed.size()}"
}
lines << ""
lines << "Inaktive (unbenutzte) Schemes, exkl. EXCLUDED_IDS:"
inactive.each { entry ->
def s = entry.scheme
lines << String.format(
"- ID=%s | Name=\"%s\" | Beschreibung=\"%s\" | Projekte=%s",
s.id,
s.name ?: "",
(s.description ?: "").replaceAll('\\s+', ' ').trim(),
entry.projects ?: []
)
}
def result = [
summary : [
dryRun : DRY_RUN,
totalWorkflowSchemes : allSchemes.size(),
projectsWithMapping : projectsWithWorkflowScheme.size(),
excludedIDs : EXCLUDED_IDS,
inactiveCandidates : inactive.size(),
deleted : deleted.size(),
failed : failed.size()
],
inactiveWorkflowSchemes: inactive.collect { e ->
def s = e.scheme
[
id : s.id,
name : s.name,
description : s.description,
projectsUsing: e.projects
]
},
deletedWorkflowSchemes: deleted,
failedDeletions : failed
]
logger.info lines.join("\n")
return lines.join("\n") + "\n\nJSON:\n" + JsonOutput.prettyPrint(JsonOutput.toJson(result))

View File

@ -1,227 +0,0 @@
// -----------------------------------------------------------------------------
// Housekeeping: Unbenutzte Notification Schemes finden (projektbasiert)
// Jira Cloud - ScriptRunner Console
// -----------------------------------------------------------------------------
import groovy.json.JsonOutput
// --- Konfiguration -----------------------------------------------------------
// true -> nur Testlauf, es wird NICHT gelöscht
// false -> unbenutzte Notification Schemes (ohne EXCLUDED_IDS) würden gelöscht
// (Löschlogik ist unten schon vorbereitet, aber per Default aus)
final boolean DRY_RUN = true
// Notification-Scheme-IDs, die auf keinen Fall gelöscht werden sollen
// (z.B. Standardschemata / System-Schemata)
final Set<Long> EXCLUDED_IDS = [10000L] as Set
// --- Schritt 1: Alle Notification Schemes laden (paginierte API) ------------
List<Map> allSchemes = []
int startAt = 0
int maxResults = 50
boolean finished = false
while (!finished) {
def resp = get("/rest/api/3/notificationscheme?startAt=${startAt}&maxResults=${maxResults}")
.asObject(Map)
if (resp.status != 200) {
logger.error("Konnte Notification Schemes nicht laden (startAt=${startAt}): ${resp.status} - ${resp.body}")
return "Fehler beim Laden der Notification Schemes. Siehe Log."
}
def body = resp.body ?: [:]
def values = (body.values ?: []) as List<Map>
allSchemes.addAll(values)
boolean isLast = (body.isLast == true)
int total = (body.total ?: (startAt + values.size())) as int
logger.info "Notification Schemes geladen: ${allSchemes.size()} (total ~ ${total}), isLast=${isLast}"
if (isLast || values.isEmpty()) {
finished = true
} else {
startAt += maxResults
if (startAt >= total) {
finished = true
}
}
}
logger.info "Anzahl Notification Schemes insgesamt: ${allSchemes.size()}"
// Map: schemeId -> [scheme: <Objekt>, used: boolean, projects: Set<projectKey>]
def schemeUsage = allSchemes.collectEntries { scheme ->
Long id = (scheme.id as Long)
[
(id): [
scheme : scheme,
used : false,
projects: [] as Set<String>
]
]
}
// --- Schritt 2: Alle Projekte laden und pro Projekt das Notification Scheme holen ---
int totalProjects = 0
startAt = 0
finished = false
while (!finished) {
def projResp = get("/rest/api/3/project/search?startAt=${startAt}&maxResults=${maxResults}")
.asObject(Map)
if (projResp.status != 200) {
logger.error("Konnte Projekte nicht laden (startAt=${startAt}): ${projResp.status} - ${projResp.body}")
break
}
def body = projResp.body ?: [:]
def projects = (body.values ?: []) as List<Map>
totalProjects += projects.size()
logger.info "Verarbeite Projekte ${startAt} bis ${startAt + projects.size() - 1} ..."
projects.each { proj ->
String projectKey = proj.key
String projectId = proj.id?.toString()
// Für jedes Projekt das zugeordnete Notification Scheme holen
def notifResp = get("/rest/api/3/project/${projectId}/notificationscheme")
.asObject(Map)
if (notifResp.status == 200) {
def schemeId = notifResp.body?.id
if (schemeId) {
Long idLong = (schemeId as Long)
def entry = schemeUsage[idLong]
if (entry) {
entry.used = true
entry.projects << projectKey
} else {
// Projekt nutzt ein Scheme, das nicht in der Liste war (sollte selten sein)
logger.warn "Projekt ${projectKey} nutzt Notification Scheme ID=${schemeId}, das nicht in der globalen Liste war."
}
}
} else if (notifResp.status == 404) {
// z.B. Team-managed-Projekte, die kein klassisches Notification Scheme haben
logger.debug "Projekt ${projectKey} hat kein klassisches Notification Scheme (404)."
} else {
logger.warn "Konnte Notification Scheme für Projekt ${projectKey} nicht laden: ${notifResp.status} - ${notifResp.body}"
}
}
int total = (body.total ?: totalProjects) as int
startAt += maxResults
if (startAt >= total) {
finished = true
}
}
// --- Schritt 3: Unbenutzte (und nicht ausgeschlossene) Schemes bestimmen -----
def unused = schemeUsage.values()
.findAll { entry ->
Long id = (entry.scheme.id as Long)
!entry.used && !EXCLUDED_IDS.contains(id)
}
.sort { it.scheme.name?.toString()?.toLowerCase() }
logger.info "Projekte insgesamt : ${totalProjects}"
logger.info "Ausgeschlossene Notification-IDs : ${EXCLUDED_IDS.join(', ')}"
logger.info "Unbenutzte Notification Schemes : ${unused.size()}"
// --- Schritt 4: Optional löschen (aktuell noch durch DRY_RUN geschützt) -----
List<Map> deleted = []
List<Map> failed = []
if (!DRY_RUN) {
unused.each { entry ->
def s = entry.scheme
Long id = (s.id as Long)
logger.info "Lösche Notification Scheme ID=${id}, Name=\"${s.name}\" ..."
def delResp = delete("/rest/api/3/notificationscheme/${id}")
.asString()
if (delResp.status in [200, 204]) {
logger.info "Erfolgreich gelöscht: ID=${id}, Name=\"${s.name}\""
deleted << [
id : id,
name : s.name,
description: s.description
]
} else {
logger.warn "Löschen fehlgeschlagen für ID=${id}, Name=\"${s.name}\": Status=${delResp.status}, Body=${delResp.body}"
failed << [
id : id,
name : s.name,
status : delResp.status,
body : delResp.body
]
}
}
} else {
logger.info "DRY_RUN = true -> Es wird NICHT gelöscht, nur Kandidaten ermittelt."
}
// --- Schritt 5: Zusammenfassung ---------------------------------------------
def lines = []
lines << "=== Notification Schemes Housekeeping (projektbasiert) ==="
lines << "DRY_RUN : ${DRY_RUN}"
lines << "Gesamt Notification Schemes : ${allSchemes.size()}"
lines << "Gesamt Projekte : ${totalProjects}"
lines << "Ausgeschlossene IDs : ${EXCLUDED_IDS.join(', ')}"
lines << "Kandidaten (unused) : ${unused.size()}"
if (!DRY_RUN) {
lines << "Gelöscht : ${deleted.size()}"
lines << "Fehlgeschlagen : ${failed.size()}"
}
lines << ""
lines << "Kandidaten (unbenutzte Schemes, exkl. EXCLUDED_IDS):"
unused.each { entry ->
def s = entry.scheme
lines << String.format(
"- ID=%s | Name=\"%s\" | Beschreibung=\"%s\" | Projekte=%s",
s.id,
s.name ?: "",
(s.description ?: "").replaceAll('\\s+', ' ').trim(),
entry.projects ?: []
)
}
def result = [
summary : [
dryRun : DRY_RUN,
totalNotificationSchemes: allSchemes.size(),
totalProjects : totalProjects,
excludedIDs : EXCLUDED_IDS,
candidateUnused : unused.size(),
deleted : deleted.size(),
failed : failed.size()
],
candidateUnusedSchemes: unused.collect { e ->
def s = e.scheme
[
id : s.id,
name : s.name,
description : s.description,
projectsUsing: e.projects
]
},
deletedNotificationSchemes: deleted,
failedDeletions : failed
]
logger.info lines.join("\n")
return lines.join("\n") + "\n\nJSON:\n" + JsonOutput.prettyPrint(JsonOutput.toJson(result))

View File

@ -1,135 +0,0 @@
import groovy.json.JsonOutput
// -------------------------- Konfiguration --------------------------
boolean DRY_RUN = true // erst prüfen; auf false stellen, wenn die Kandidatenliste stimmt
int PAGE_SIZE = 50
Set<String> EXCLUDE_BY_NAME = [
'Default Screen Scheme'
] as Set
// -------------------------- Logging -------------------------------
void logInfo(String msg){ try { logger.info(msg) } catch(e){ println msg } }
void logWarn(String msg){ try { logger.warn(msg) } catch(e){ println "WARN: " + msg } }
void logErr (String msg){ try { logger.error(msg)} catch(e){ println "ERR: " + msg } }
// -------------------------- HTTP Helpers --------------------------
Map getAsMap(String path, Map<String,Object> q=[:]) {
def req = get(path)
q.each { k,v -> req = req.queryString(k, v) }
def resp = req.asObject(Map)
if (resp.status != 200) {
throw new RuntimeException("GET " + path + " failed: HTTP " + resp.status + " :: " + resp.body)
}
return (resp.body ?: [:]) as Map
}
List<Map> pagedGetValues(String path, int pageSize) {
int startAt = 0
List<Map> all = []
while (true) {
Map body = getAsMap(path, [startAt: startAt, maxResults: pageSize])
List vals = (body.values ?: []) as List
all.addAll(vals as List<Map>)
int total = (body.total ?: (startAt + vals.size())) as int
int nextStart = startAt + ((body.maxResults ?: vals.size()) as int)
if (vals.isEmpty() || nextStart >= total) break
startAt = nextStart
}
return all
}
// -------------------------- Fetchers ------------------------------
List<Map> fetchScreenSchemes(int pageSize) {
logInfo("Lade Screen Schemes…")
List<Map> list = pagedGetValues("/rest/api/3/screenscheme", pageSize)
logInfo("Screen Schemes gefunden: " + list.size())
return list
}
List<Map> fetchIssueTypeScreenSchemes(int pageSize) {
logInfo("Lade Issue Type Screen Schemes…")
List<Map> list = pagedGetValues("/rest/api/3/issuetypescreenscheme", pageSize)
logInfo("Issue Type Screen Schemes gefunden: " + list.size())
return list
}
/**
* Holt alle Referenzen auf Screen Schemes:
* - defaultScreenSchemeId je ITSS
* - globale ITSSIssueTypeScreenScheme Mappings aus /issuetypescreenscheme/mapping
*/
Set<Long> fetchReferencedScreenSchemeIds(int pageSize, List<Map> itssList) {
Set<Long> refs = new LinkedHashSet<>()
// 1) Default-Zuordnungen je ITSS
for (Map itss : itssList) {
def defId = itss.get("defaultScreenSchemeId")
if (defId != null) refs.add(Long.valueOf(defId.toString()))
}
// 2) Globale Mappings
logInfo("Lade globale IssueType→ScreenScheme Mappings…")
List<Map> globalMaps = pagedGetValues("/rest/api/3/issuetypescreenscheme/mapping", pageSize)
for (Map m : globalMaps) {
def ssId = m.get("screenSchemeId")
if (ssId != null) refs.add(Long.valueOf(ssId.toString()))
}
logInfo("Referenzierte Screen Schemes gesamt: " + refs.size())
return refs
}
// -------------------------- Delete -------------------------------
boolean deleteScreenScheme(long id, String name) {
def resp = delete("/rest/api/3/screenscheme/" + id).asString()
if (resp.status in [200,204]) {
logInfo("Gelöscht: [" + id + "] " + name)
return true
}
logWarn("Nicht gelöscht [" + id + "] " + name + " :: HTTP " + resp.status + " :: " + resp.body)
return false
}
// -------------------------- Main -------------------------------
void runCleanup(boolean dryRun, int pageSize, Set<String> excludeByName) {
List<Map> screenSchemes = fetchScreenSchemes(pageSize)
if (screenSchemes.isEmpty()) { logInfo("Keine Screen Schemes vorhanden nichts zu tun."); return }
List<Map> itssList = fetchIssueTypeScreenSchemes(pageSize)
Set<Long> referenced = fetchReferencedScreenSchemeIds(pageSize, itssList)
List<Map> candidates = []
for (Map s : screenSchemes) {
long id = Long.valueOf(s.get("id").toString())
String name = (s.get("name") ?: "") as String
if (!referenced.contains(id) && !excludeByName.contains(name)) {
candidates.add([id: id, name: name, description: (s.get("description") ?: "")])
}
}
candidates.sort { a, b -> a.name <=> b.name }
if (candidates.isEmpty()) { logInfo("Keine ungenutzten Screen Schemes gefunden. ✅"); return }
logWarn("Ungenutzte Kandidaten (" + candidates.size() + "):")
for (Map c : candidates) logWarn(" - [" + c.id + "] " + c.name)
if (dryRun) {
logInfo("Dry-Run aktiv → nichts gelöscht.")
logInfo("JSON Dump:\n" + JsonOutput.prettyPrint(JsonOutput.toJson(candidates)))
return
}
int deleted = 0, skipped = 0
for (Map c : candidates) {
try {
if (deleteScreenScheme((c.id as Long), c.name.toString())) deleted++ else skipped++
} catch (Exception ex) {
skipped++
logErr("Fehler beim Löschen [" + c.id + "] " + c.name + " :: " + ex.message)
}
}
logWarn("Fertig. Ergebnis: deleted=" + deleted + ", skipped=" + skipped)
}
// ---- Start ----
runCleanup(DRY_RUN, PAGE_SIZE, EXCLUDE_BY_NAME)

View File

@ -1,194 +0,0 @@
// -----------------------------------------------------------------------------
// Housekeeping: Unbenutzte Berechtigungsschemata finden UND löschen
// Jira Cloud - ScriptRunner Console
// -----------------------------------------------------------------------------
import groovy.json.JsonOutput
// --- Konfiguration -----------------------------------------------------------
// 🔥 Wenn true -> nur Testlauf, nichts wird gelöscht.
// 🔥 Wenn false -> unbenutzte Schemas (ohne EXCLUDED_IDS) werden gelöscht.
final boolean DRY_RUN = true
// IDs, die niemals gelöscht werden sollen (z. B. Default/System-Schemata)
final Set<Long> EXCLUDED_IDS = [0] as Set // bei Bedarf ergänzen, z.B. 10000L etc.
// --- Schritt 1: Alle Berechtigungsschemata holen ----------------------------
def schemesResp = get("/rest/api/3/permissionscheme").asObject(Map)
if (schemesResp.status != 200) {
logger.error("Konnte Berechtigungsschemata nicht laden: ${schemesResp.status} - ${schemesResp.body}")
return "Fehler beim Laden der Berechtigungsschemata. Siehe Log."
}
def schemes = schemesResp.body?.permissionSchemes ?: []
logger.info "Anzahl Berechtigungsschemata insgesamt: ${schemes.size()}"
// Map: schemeId -> [scheme: <Objekt>, used: boolean, projects: [keys]]
def schemeUsage = schemes.collectEntries { scheme ->
def id = (scheme.id ?: scheme["id"]) as Long
[
(id): [
scheme : scheme,
used : false,
projects: []
]
]
}
// --- Schritt 2: Alle Projekte holen & Permission-Scheme je Projekt ermitteln --
int startAt = 0
int maxResults = 50
int totalProjects = 0
boolean finished = false
while (!finished) {
def projResp = get("/rest/api/3/project/search?startAt=${startAt}&maxResults=${maxResults}")
.asObject(Map)
if (projResp.status != 200) {
logger.error("Konnte Projekte nicht laden (startAt=${startAt}): ${projResp.status} - ${projResp.body}")
break
}
def body = projResp.body ?: [:]
def projects = body.values ?: []
totalProjects += projects.size()
projects.each { proj ->
def projectKey = proj.key
def projectId = proj.id
def permResp = get("/rest/api/3/project/${projectId}/permissionscheme")
.asObject(Map)
if (permResp.status == 200) {
def schemeId = permResp.body?.id
if (schemeId) {
def idLong = (schemeId as Long)
def entry = schemeUsage[idLong]
if (entry) {
entry.used = true
entry.projects << projectKey
} else {
logger.warn "Projekt ${projectKey} nutzt Berechtigungsschema ${schemeId}, das nicht in der globalen Liste war."
}
}
} else if (permResp.status == 404) {
// Team-managed-Projekte -> haben kein klassisches Permission Scheme
} else {
logger.warn "Konnte Permission Scheme für Projekt ${projectKey} nicht laden: ${permResp.status}"
}
}
int total = (body.total ?: totalProjects) as int
startAt += maxResults
if (startAt >= total) {
finished = true
}
}
// --- Schritt 3: Unbenutzte (und nicht ausgeschlossene) Schemata bestimmen ---
def unused = schemeUsage.values()
.findAll { entry ->
def id = (entry.scheme.id ?: 0L) as Long
!entry.used && !EXCLUDED_IDS.contains(id)
}
.sort { it.scheme.name?.toString()?.toLowerCase() }
logger.info "Projekte insgesamt : ${totalProjects}"
logger.info "Ausgeschlossene Schema-IDs : ${EXCLUDED_IDS.join(', ')}"
logger.info "Unbenutzte Schemata (Kandidaten): ${unused.size()}"
// --- Schritt 4: Optional löschen --------------------------------------------
def deleted = []
def failed = []
if (!DRY_RUN) {
unused.each { entry ->
def s = entry.scheme
def id = (s.id as Long)
logger.info "Lösche Berechtigungsschema ID=${id}, Name=\"${s.name}\" ..."
def delResp = delete("/rest/api/3/permissionscheme/${id}")
.asString()
if (delResp.status in [200, 204]) {
logger.info "Erfolgreich gelöscht: ID=${id}, Name=\"${s.name}\""
deleted << [
id : id,
name : s.name,
description: s.description
]
} else {
logger.warn "Löschen fehlgeschlagen für ID=${id}, Name=\"${s.name}\": Status=${delResp.status}, Body=${delResp.body}"
failed << [
id : id,
name : s.name,
status : delResp.status,
body : delResp.body
]
}
}
} else {
logger.info "DRY_RUN = true -> Es wird nichts gelöscht, nur Kandidaten ermittelt."
}
// --- Schritt 5: Zusammenfassung zurückgeben ---------------------------------
def lines = []
lines << "=== Berechtigungsschemata Housekeeping ==="
lines << "DRY_RUN : ${DRY_RUN}"
lines << "Gesamt-Schemata : ${schemes.size()}"
lines << "Gesamt-Projekte : ${totalProjects}"
lines << "Ausgeschlossene IDs : ${EXCLUDED_IDS.join(', ')}"
lines << "Kandidaten (unused) : ${unused.size()}"
if (!DRY_RUN) {
lines << "Gelöscht : ${deleted.size()}"
lines << "Fehlgeschlagen : ${failed.size()}"
}
lines << ""
lines << "Kandidaten (unbenutzte Schemas, exkl. EXCLUDED_IDS):"
unused.each { entry ->
def s = entry.scheme
lines << String.format(
"- ID=%s | Name=\"%s\" | Beschreibung=\"%s\" | Projekte=%s",
s.id,
s.name ?: "",
(s.description ?: "").replaceAll('\\s+', ' ').trim(),
entry.projects ?: []
)
}
def result = [
summary : [
dryRun : DRY_RUN,
totalSchemes : schemes.size(),
totalProjects : totalProjects,
excludedIDs : EXCLUDED_IDS,
candidateUnused : unused.size(),
deleted : deleted.size(),
failed : failed.size()
],
deletedPermissionSchemes: deleted,
failedDeletions : failed,
candidateUnusedSchemes : unused.collect { e ->
def s = e.scheme
[
id : s.id,
name : s.name,
description : s.description,
projectsUsing: e.projects
]
}
]
logger.info lines.join("\n")
return lines.join("\n") + "\n\nJSON:\n" + JsonOutput.prettyPrint(JsonOutput.toJson(result))

View File

@ -0,0 +1,62 @@
import utils.LinkedIssueTransitions
import utils.FieldCopy
final String LINK_TYPE_NAME = "is cloned by"
final String TARGET_PROJECT_KEY = "CSD"
final String SOURCE_FIELD_ID = "customfield_11501" // ADF
final String TARGET_FIELD_ID = "customfield_11501"
final String LOG_PREFIX = "[CopyField->LinkedIssue]"
def sourceKey = issue?.key?.toString()
if (!sourceKey) {
logger.warn("${LOG_PREFIX} Kein issue.key im Kontext. Abbruch.")
return
}
// Source laden: Links + Feld
def issueResp = get("/rest/api/3/issue/${sourceKey}")
.queryString("fields", "issuelinks,${SOURCE_FIELD_ID}")
.asObject(Map)
if (issueResp.status != 200) {
logger.warn("${LOG_PREFIX} Konnte ${sourceKey} nicht laden (${issueResp.status}). Body=${issueResp.body}")
return
}
def sourceJson = issueResp.body
// Ziel ermitteln
def targetKey = LinkedIssueTransitions.findSingleLinkedTargetKey(
sourceJson,
LINK_TYPE_NAME,
TARGET_PROJECT_KEY
)
if (!targetKey) {
logger.warn("${LOG_PREFIX} Kein eindeutiges Ziel-Ticket gefunden (erwartet genau 1 Link ins Projekt ${TARGET_PROJECT_KEY}). Abbruch.")
return
}
// Feldwert lesen (ADF)
def value = FieldCopy.getFieldValue(sourceJson, SOURCE_FIELD_ID)
// Falls leer: Ziel unverändert lassen
if (value == null) {
logger.warn("${LOG_PREFIX} Source-Feld ${SOURCE_FIELD_ID} ist null/leer in ${sourceKey}. Zielfeld bleibt unverändert.")
return
}
logger.info("${LOG_PREFIX} Kopiere ${SOURCE_FIELD_ID} von ${sourceKey} nach ${targetKey}")
def body = FieldCopy.buildSingleFieldUpdateBody(TARGET_FIELD_ID, value)
def putResp = put("/rest/api/3/issue/${targetKey}")
.header("Content-Type", "application/json")
.body(body)
.asObject(Map)
if (putResp.status == 204) {
logger.info("${LOG_PREFIX} OK: Feld ${TARGET_FIELD_ID} in ${targetKey} aktualisiert.")
} else {
logger.warn("${LOG_PREFIX} Feld-Update fehlgeschlagen: status=${putResp.status}, body=${putResp.body}")
}

View File

@ -0,0 +1,122 @@
/**
* -----------------------------------------------------------------------------
* Workflow Postfunction (ScriptRunner for Jira Cloud)
* -----------------------------------------------------------------------------
*
* Name
* ----------------
* [CoE] Transition linked CSD ticket on close
*
* Zweck
* -----
* Beim Ausführen der Transition im CoE-Ticket soll ein eindeutig verlinktes
* Ticket im Zielprojekt (z.B. CSD-xxxx) automatisch per Transition
* weitergeschaltet werden (z.B. Status "Back from CoE").
*
* Prozess-Annahme
* ---------------
* - Es existiert genau EIN Link vom CoE-Ticket zu einem Ticket im Zielprojekt.
* - Der Linktyp (Name) ist bekannt, z.B. "is cloned by".
*
* Technischer Ansatz
* ------------------
* - HTTP-Calls (get/post) bleiben im Workflow-Kontext, weil ScriptRunner Cloud
* diese Helper dort zuverlässig bereitstellt.
* - Die Ermittlung des Ziel-Tickets ist in eine Script-Manager-Utility ausgelagert:
* utils.LinkedIssueTransitions.findSingleLinkedTargetKey(...)
*
* Konfiguration
* -------------
* - LINK_TYPE_NAME: Name der Link-Richtung (inward oder outward), wie er in Jira
* angezeigt wird (z.B. "is cloned by").
* - TRANSITION_ID: Die ID der Transition, die im Ziel-Ticket ausgeführt werden soll.
* - TARGET_PROJECT_KEY: Projekt-Key des Zielprojekts (z.B. "CSD").
*
* Logging
* -------
* Das Skript loggt:
* - Start und Konfiguration
* - Fehlerzustände (kein Source-Key, HTTP Fehler, kein eindeutiges Target)
* - Erfolg/Misserfolg der Transition im Ziel-Ticket
*
* -----------------------------------------------------------------------------
*/
import utils.LinkedIssueTransitions
// ------------------------- Konfiguration ------------------------------------
// Linktyp-Name (Richtung) z.B. "is cloned by"
final String LINK_TYPE_NAME = "is cloned by"
// Transition im Zielprojekt z.B. "CoE erledigt" (ID = 441)
final String TRANSITION_ID = "441"
// Zielprojekt, in dem das verlinkte Ticket liegt
final String TARGET_PROJECT_KEY = "CSD"
// Einheitlicher Log-Prefix (macht das Filtern in Logs leichter)
final String LOG_PREFIX = "[CoE->Linked Transition]"
// ------------------------- Guard: Source Issue Key --------------------------
// issue kommt aus dem Workflow-Kontext der Postfunction.
def sourceKey = issue?.key?.toString()
if (!sourceKey) {
logger.warn("${LOG_PREFIX} Kein issue.key im Kontext. Abbruch.")
return
}
logger.info("${LOG_PREFIX} Start. Source=${sourceKey}, linkType='${LINK_TYPE_NAME}', transitionId=${TRANSITION_ID}, targetProject=${TARGET_PROJECT_KEY}")
// ------------------------- 1) Source Issue laden ----------------------------
// Wir brauchen die Issue-Links (issuelinks), weil dort die verknüpften Tickets stehen.
// Hinweis: Wir laden nur das Feld "issuelinks", um Payload klein und schnell zu halten.
def issueResp = get("/rest/api/3/issue/${sourceKey}")
.queryString("fields", "issuelinks")
.asObject(Map)
// Jira REST: 200 = OK
if (issueResp.status != 200) {
logger.warn("${LOG_PREFIX} Konnte ${sourceKey} nicht laden (${issueResp.status}). Body=${issueResp.body}")
return
}
// ------------------------- 2) Zielkey finden (Utility) ----------------------
// In der Utility stecken unsere Regeln:
// - Filter nach Linktyp-Name (inward/outward)
// - Filter nach Zielprojekt-Key-Prefix (z.B. "CSD-")
// - Es muss GENAU ein Treffer sein, sonst null.
def targetKey = LinkedIssueTransitions.findSingleLinkedTargetKey(
issueResp.body,
LINK_TYPE_NAME,
TARGET_PROJECT_KEY
)
if (!targetKey) {
logger.warn("${LOG_PREFIX} Kein eindeutiges Ziel-Ticket gefunden (erwartet genau 1 Link ins Projekt ${TARGET_PROJECT_KEY}). Abbruch.")
return
}
logger.info("${LOG_PREFIX} Ziel-Ticket: ${targetKey}. Führe Transition aus…")
// ------------------------- 3) Transition im Ziel-Ticket ausführen -----------
// Jira REST Transition Endpoint:
// POST /rest/api/3/issue/{issueIdOrKey}/transitions
//
// Body:
// { "transition": { "id": "441" } }
//
// Erfolg: typischerweise 204 (No Content)
def transResp = post("/rest/api/3/issue/${targetKey}/transitions")
.header("Content-Type", "application/json")
.body([ transition: [ id: TRANSITION_ID ] ])
.asObject(Map)
if (transResp.status == 204) {
logger.info("${LOG_PREFIX} OK: ${targetKey} erfolgreich transitioniert (ID=${TRANSITION_ID}).")
} else {
// Häufige Fehlerursachen:
// - Run-as User / Add-on User hat keine Berechtigung im Zielprojekt
// - Transition-ID passt nicht zum Workflow/Status des Ziel-Tickets
// - Ziel-Ticket ist in einem Status, in dem die Transition nicht verfügbar ist
logger.warn("${LOG_PREFIX} Transition fehlgeschlagen für ${targetKey}: status=${transResp.status}, body=${transResp.body}")
}

View File

@ -1,21 +1,3 @@
# Jira-Scripte
Scripte für Automations und Workflows (pds)
## Repo vom Gitea auf den Server holen (clone)
git clone https://git.bartschatten.de/mfredrich/<repo>.git
## Änderungen anschauen
git status
## Änderungen committen + pushen
git add .
git commit -m "describe change"
git push
## Updates vom Repo holen
git pull

View File

@ -0,0 +1,193 @@
/*
* ScriptRunner Cloud - Scheduled Job
*
* Zweck:
* - Findet Issues, die seit 48-72h "Resolved" sind und noch ein DueDate haben
* - Setzt Kommentar (ADF) VOR dem Schließen (da Closed nicht kommentierbar ist)
* - Leert danach DueDate
* - Transition nach Zielstatus "Closed"
*
* Ablauf pro Issue:
* 1) Kommentar hinzufügen
* 2) DueDate leeren
* 3) Transition -> Closed
*
* Hinweise:
* - Transition-ID ist optional; Standard ist Auflösung über Zielstatus (to.name).
* - Bei gemischten Workflows ist die Zielstatus-Auflösung meist stabiler als feste IDs.
*/
// ===================== Konfiguration =====================
final String JQL = 'project = "Customer Service Desk" AND status = Resolved AND duedate IS NOT EMPTY AND resolutiondate >= -72h AND resolutiondate <= -48h'
final int MAX_PER_PAGE = 50
// Optional: Wenn du eine Transition-ID erzwingen willst (z.B. "331"), hier setzen. Sonst null lassen.
final String TRANSITION_ID = null
// Zielstatus-Name (wird bevorzugt genutzt)
final String TARGET_STATUS_NAME = "Closed"
// Fallback über Aktionsnamen (falls "to.name" nicht hilft)
final List<String> ACTION_NAME_FALLBACKS = ["Schließen", "Close", "Closed"]
// Kommentartext (als ADF gesendet)
final String COMMENT_TEXT = """
Dieses Ticket wurde automatisch geschlossen, nachdem der Support die Lösung präsentiert hat und wir davon ausgehen, dass die Lösung korrekt ist.
Sollten weiterhin Fragen bestehen oder erneut Unterstützung benötigt werden, können Sie eine neue Anfrage stellen.
Ihr pds Support
""".trim()
final Map ADF_BODY = [
type : "doc",
version: 1,
content: [[
type : "paragraph",
content: [[ type: "text", text: COMMENT_TEXT ]]
]]
]
// ========================================================
// --------------------- Helper ---------------------------
/**
* Liefert die passende Transition-ID für das Issue.
* Priorität:
* 1) harte TRANSITION_ID (wenn gesetzt)
* 2) Transition, deren Zielstatus to.name == TARGET_STATUS_NAME
* 3) Transition, deren Aktionsname in ACTION_NAME_FALLBACKS ist
*/
def resolveTransitionId = { String issueKey ->
if (TRANSITION_ID?.trim()) {
return TRANSITION_ID.trim()
}
def resp = get("/rest/api/3/issue/${issueKey}/transitions").asObject(Map)
if (resp.status != 200) {
throw new IllegalStateException("Transitions nicht lesbar: HTTP ${resp.status} - ${resp.body}")
}
List transitions = (resp.body?.transitions as List) ?: []
// 1) nach Ziel-Status (to.name)
def byTargetStatus = transitions.find { t ->
(t?.to?.name as String)?.equalsIgnoreCase(TARGET_STATUS_NAME)
}
if (byTargetStatus?.id) return byTargetStatus.id as String
// 2) nach Aktionsname (name)
def byActionName = transitions.find { t ->
ACTION_NAME_FALLBACKS.any { fn -> (t?.name as String)?.equalsIgnoreCase(fn) }
}
return byActionName?.id as String
}
/** Kommentar hinzufügen (ADF) */
def addComment = { String issueKey, Map adf ->
def resp = post("/rest/api/3/issue/${issueKey}/comment")
.header("Content-Type", "application/json")
.body([ body: adf ])
.asObject(Map)
if (resp.status != 201) {
throw new IllegalStateException("Kommentar fehlgeschlagen: HTTP ${resp.status} - ${resp.body}")
}
}
/** DueDate leeren */
def clearDueDate = { String issueKey ->
def resp = put("/rest/api/3/issue/${issueKey}")
.header("Content-Type", "application/json")
.body([ fields: [ duedate: null ] ])
.asString()
if (resp.status != 204) {
throw new IllegalStateException("DueDate leeren fehlgeschlagen: HTTP ${resp.status} - ${resp.body}")
}
}
/** Transition ausführen */
def doTransition = { String issueKey, String transitionId ->
def resp = post("/rest/api/3/issue/${issueKey}/transitions")
.header("Content-Type", "application/json")
.body([ transition: [ id: transitionId ] ])
.asString()
if (resp.status != 204) {
throw new IllegalStateException("Transition fehlgeschlagen (id=${transitionId}): HTTP ${resp.status} - ${resp.body}")
}
}
/** Kleiner Helper fürs Logging, damit Logs konsistent sind */
def logOk = { String issueKey, String msg -> logger.info("[AUTO-CLOSE] ${issueKey}: ${msg}") }
def logWarn = { String issueKey, String msg -> logger.warn("[AUTO-CLOSE] ${issueKey}: ${msg}") }
def logErr = { String issueKey, String msg -> logger.error("[AUTO-CLOSE] ${issueKey}: ${msg}") }
// --------------------- Verarbeitung ----------------------
int processed = 0
int skipped = 0
int failed = 0
String nextPageToken = null
boolean isLast = false
logger.info("[AUTO-CLOSE] Job Start. JQL='${JQL}'")
while (!isLast) {
def req = get("/rest/api/3/search/jql")
.queryString("jql", JQL)
.queryString("fields", "status,duedate") // key ist immer dabei
.queryString("maxResults", MAX_PER_PAGE as String)
if (nextPageToken) {
req = req.queryString("nextPageToken", nextPageToken)
}
def searchResp = req.asObject(Map)
if (searchResp.status != 200) {
throw new IllegalStateException("Search-Fehler: HTTP ${searchResp.status} - ${searchResp.body}")
}
Map body = searchResp.body as Map
List issues = (body?.issues as List) ?: []
isLast = (body?.isLast == true)
nextPageToken = body?.nextPageToken as String
logger.info("[AUTO-CLOSE] Seite geladen: issues=${issues.size()}, isLast=${isLast}, nextPageToken=${nextPageToken}")
issues.each { Map iss ->
String key = iss["key"] as String
try {
// 1) Kommentar VOR dem Schließen
addComment(key, ADF_BODY)
logOk(key, "Kommentar gesetzt.")
// 2) DueDate leeren
clearDueDate(key)
logOk(key, "DueDate geleert.")
// 3) Transition -> Closed
String transitionId = resolveTransitionId(key)
if (!transitionId) {
skipped++
logWarn(key, "Keine passende Transition nach '${TARGET_STATUS_NAME}' gefunden (Fallbacks=${ACTION_NAME_FALLBACKS}). Ticket bleibt Resolved.")
return
}
doTransition(key, transitionId)
logOk(key, "Transition ausgeführt (id=${transitionId}) -> '${TARGET_STATUS_NAME}'.")
processed++
} catch (Throwable t) {
failed++
logErr(key, "Fehler: ${t.class.simpleName}: ${t.message}")
}
}
}
logger.info("[AUTO-CLOSE] Job Ende. processed=${processed}, skipped=${skipped}, failed=${failed} (JQL='${JQL}')")

View File

@ -0,0 +1,201 @@
// ScriptRunner for Jira Cloud - Scheduled Job
// Auto-close issues in "Waiting for Customer" after X days inactivity.
// 1) JQL search (paging via nextPageToken)
// 2) If needed set customfield_11433 based on customfield_10039
// 3) Add comment (ADF) BEFORE closing (so customer gets notification)
// 4) Transition to "Closed" via fixed transition ID (411) and set Resolution "Keine Lösung" (10010)
final String projectNameOrKey = 'Customer Service Desk' // oder Projekt-Key "CSD"
final String waitingStatusName = 'Waiting for Customer'
final int inactivityDays = 2
final int maxPerPage = 50
final boolean dryRun = false
// Transition "Schließen"
final String transitionIdClose = "411"
// Resolution: "Keine Lösung"
final String resolutionIdNoSolution = "10010"
// Custom fields
final String cfChannel = "customfield_10039" // Single select: "Customer Service" | "Partner Support"
final String cfFlag = "customfield_11433" // Set to "Ja" or "Nein"
// Select values
final String channelCustomerService = "Customer Service"
final String channelPartnerSupport = "Partner Support"
// Comment template (issue key gets injected per issue)
final String commentTemplate =
"""Wir haben Ihre Supportanfrage geschlossen, da wir in den letzten %d Tagen keine Rückmeldung von Ihnen erhalten haben.
Sollten Sie weiterhin Unterstützung benötigen, erstellen Sie bitte eine neue Supportanfrage und verweisen Sie dabei gern auf die Anfragenummer %s."""
final String jql = "project = \"${projectNameOrKey}\" AND status = \"${waitingStatusName}\" AND resolution IS EMPTY AND updated < startOfDay(-${inactivityDays})"
// ## Test mit einzelticket #####
//final String jql = "project = \"${projectNameOrKey}\" AND key in (CSD-2124)"
Map buildAdfComment(String text) {
[
type: "doc",
version: 1,
content: [[
type: "paragraph",
content: [[type: "text", text: text]]
]]
]
}
logger.info("=== Auto-Close Job gestartet ===")
logger.info("JQL: ${jql}")
logger.info("Transition ID (Close): ${transitionIdClose}")
logger.info("Resolution ID (Keine Lösung): ${resolutionIdNoSolution}")
logger.info("DRY_RUN: ${dryRun}")
String nextPageToken = null
int processed = 0
int closed = 0
int skipped = 0
int failed = 0
while (true) {
def req = get("/rest/api/3/search/jql")
.queryString("jql", jql)
.queryString("maxResults", maxPerPage.toString())
// Wichtig: cfChannel mitsenden, sonst können wir es nicht auswerten
.queryString("fields", "status,resolution,updated,${cfChannel}")
if (nextPageToken) {
req = req.queryString("nextPageToken", nextPageToken)
}
def searchResp = req.asObject(Map)
if (searchResp.status != 200) {
logger.error("JQL-Suche fehlgeschlagen: ${searchResp.status} - ${searchResp.body}")
break
}
def issues = (searchResp.body?.issues ?: []) as List
nextPageToken = searchResp.body?.nextPageToken as String
logger.info("Seite: ${issues.size()} Issues, nextPageToken=${nextPageToken ?: 'none'}")
if (!issues) break
issues.each { i ->
processed++
String issueKey = i?.key
String statusName = i?.fields?.status?.name
def resolution = i?.fields?.resolution
String updated = i?.fields?.updated
if (!issueKey) {
skipped++
logger.warn("Issue ohne Key übersprungen: ${i}")
return
}
if (!statusName?.equalsIgnoreCase(waitingStatusName)) {
skipped++
logger.info("${issueKey}: Status '${statusName}' != '${waitingStatusName}' -> skip")
return
}
/*
if (resolution != null) {
skipped++
logger.info("${issueKey}: hat bereits Resolution -> skip")
return
}
*/
logger.info("${issueKey}: Kandidat (updated=${updated})")
if (dryRun) {
logger.info("${issueKey}: DRY_RUN -> würde Feld setzen + kommentieren + schließen")
return
}
// ========= Erweiterung: customfield_10039 auswerten und customfield_11433 setzen =========
def channelValueObj = i?.fields?."${cfChannel}" // i.fields.customfield_10039
String channelValue = channelValueObj?.value?.toString()
String flagValue = null
if (channelValue?.equalsIgnoreCase(channelCustomerService)) {
flagValue = "Ja"
} else if (channelValue?.equalsIgnoreCase(channelPartnerSupport)) {
flagValue = "Nein"
} else {
// Wenn leer/unerwartet: nichts setzen, aber loggen
logger.warn("${issueKey}: ${cfChannel} ist leer oder unerwartet ('${channelValue}'), setze ${cfFlag} nicht.")
}
if (flagValue != null) {
def updateResp = put("/rest/api/3/issue/${issueKey}")
.header("Content-Type", "application/json")
.body([
fields: [
(cfFlag): [value: flagValue] // Single select sauber setzen
]
])
.asString()
if (updateResp.status == 204) {
logger.info("${issueKey}: ${cfFlag} gesetzt auf '${flagValue}' (basierend auf ${cfChannel}='${channelValue}')")
} else {
failed++
logger.error("${issueKey}: Setzen von ${cfFlag} fehlgeschlagen: ${updateResp.status} - ${updateResp.body}")
return
}
}
// ========= Kommentar (vor dem Schließen) =========
String commentText = String.format(commentTemplate, inactivityDays, issueKey)
def commentResp = post("/rest/api/3/issue/${issueKey}/comment")
.header("Content-Type", "application/json")
.body([body: buildAdfComment(commentText)])
.asObject(Map)
if (!(commentResp.status in [200, 201])) {
failed++
logger.error("${issueKey}: Kommentar fehlgeschlagen: ${commentResp.status} - ${commentResp.body}")
return
}
// ========= Schließen via Transition-ID (ohne Resolution im Payload) =========
def closeResp = post("/rest/api/3/issue/${issueKey}/transitions")
.header("Content-Type", "application/json")
.body([transition: [id: transitionIdClose]])
.asString()
if (closeResp.status != 204) {
failed++
logger.error("${issueKey}: Schließen fehlgeschlagen: ${closeResp.status} - ${closeResp.body}")
return
}
logger.info("${issueKey}: erfolgreich geschlossen (transitionId=${transitionIdClose})")
// ========= Resolution nachträglich setzen =========
def resResp = put("/rest/api/3/issue/${issueKey}")
.header("Content-Type", "application/json")
.body([fields: [resolution: [id: resolutionIdNoSolution]]])
.asString()
if (resResp.status == 204) {
logger.info("${issueKey}: Resolution gesetzt auf '${resolutionIdNoSolution}' (Keine Lösung)")
} else {
failed++
logger.error("${issueKey}: Resolution setzen fehlgeschlagen: ${resResp.status} - ${resResp.body}")
// Ticket ist schon geschlossen, daher hier NICHT returnen zwingend,
// aber wir markieren es als failed für die Statistik.
}
}
if (!nextPageToken) break
}
logger.info("=== Auto-Close Job fertig ===")
logger.info("Processed=${processed}, Closed=${closed}, Skipped=${skipped}, Failed=${failed}")

View File

@ -0,0 +1,19 @@
import utils.AutoCloseJob
final String JQL = 'project = "TS" AND status = Gelöst AND resolutiondate <= startOfDay(-14d)'
// Optional: harte Transition-ID oder null
final String TRANSITION_ID = null
final String TARGET_STATUS_NAME = "Closed"
AutoCloseJob.run([
logger : logger,
JQL : JQL,
TRANSITION_ID : TRANSITION_ID,
TARGET_STATUS_NAME: TARGET_STATUS_NAME,
MAX_PER_PAGE : 50
])
// ruft den Job im Script Manager auf utils/AutoCloseJob.groovy

View File

@ -0,0 +1,149 @@
// ScriptRunner for Jira Cloud - Scheduled Job
// Auto-close issues in "Waiting for Customer" after X days inactivity.
// Steps:
// 1) JQL search (paging via nextPageToken)
// 2) Add comment (ADF) BEFORE closing (so customer gets notification)
// 3) Transition to "Closed" via transition ID
// -------------------- Konfiguration --------------------
final String projectKey = "TS"
final String waitingStatusName = "Waiting for Customer"
//final String waitingStatusName = "Wartet auf Kunden"
final int inactivityDays = 14
final int maxPerPage = 50
final boolean dryRun = false
// Transition "Schließen"
final String transitionIdClose = "2"
// Comment template (issue key gets injected per issue)
final String commentTemplate =
"""Wir haben Ihre Supportanfrage geschlossen, da wir in den letzten %d Tagen keine Rückmeldung von Ihnen erhalten haben.
Sollten Sie weiterhin Unterstützung benötigen, erstellen Sie bitte eine neue Supportanfrage und verweisen Sie dabei gern auf die Anfragenummer %s."""
final String jql = "project = ${projectKey} AND status = '${waitingStatusName}' AND resolution IS EMPTY AND updated < startOfDay(-${inactivityDays})"
// ## Test mit Einzelticket #####
// final String jql = "project = ${projectKey} AND key = TS-35"
// -------------------- Helper --------------------
Map buildAdfComment(String text) {
[
type : "doc",
version: 1,
content: [[
type : "paragraph",
content: [[type: "text", text: text]]
]]
]
}
boolean addComment(String issueKey, String commentText) {
def resp = post("/rest/api/3/issue/${issueKey}/comment")
.header("Content-Type", "application/json")
.body([body: buildAdfComment(commentText)])
.asObject(Map)
if (!(resp.status in [200, 201])) {
logger.error("${issueKey}: Kommentar fehlgeschlagen: ${resp.status} - ${resp.body}")
return false
}
return true
}
boolean closeIssue(String issueKey, String transitionId) {
def resp = post("/rest/api/3/issue/${issueKey}/transitions")
.header("Content-Type", "application/json")
.body([transition: [id: transitionId]])
.asString()
if (resp.status != 204) {
logger.error("${issueKey}: Schließen fehlgeschlagen (transitionId=${transitionId}): ${resp.status} - ${resp.body}")
return false
}
return true
}
// -------------------- Ablauf --------------------
logger.info("=== Auto-Close Job gestartet ===")
logger.info("JQL: ${jql}")
logger.info("Transition ID (Close): ${transitionIdClose}")
logger.info("DRY_RUN: ${dryRun}")
String nextPageToken = null
int processed = 0
int closed = 0
int skipped = 0
int failed = 0
while (true) {
def req = get("/rest/api/3/search/jql")
.queryString("jql", jql)
.queryString("maxResults", maxPerPage.toString())
// Nur Felder, die wir wirklich brauchen
.queryString("fields", "status,resolution,updated")
if (nextPageToken) {
req = req.queryString("nextPageToken", nextPageToken)
}
def searchResp = req.asObject(Map)
if (searchResp.status != 200) {
logger.error("JQL-Suche fehlgeschlagen: ${searchResp.status} - ${searchResp.body}")
break
}
def issues = (searchResp.body?.issues ?: []) as List
nextPageToken = searchResp.body?.nextPageToken as String
logger.info("Seite: ${issues.size()} Issues, nextPageToken=${nextPageToken ?: 'none'}")
if (!issues) break
issues.each { i ->
processed++
String issueKey = i?.key
String statusName = i?.fields?.status?.name
def resolution = i?.fields?.resolution
String updated = i?.fields?.updated
if (!issueKey) {
skipped++
logger.warn("Issue ohne Key übersprungen: ${i}")
return
}
logger.info("${issueKey}: Kandidat (updated=${updated})")
if (dryRun) {
logger.info("${issueKey}: DRY_RUN -> würde kommentieren + schließen")
return
}
// 1) Kommentar vor dem Schließen
String commentText = String.format(commentTemplate, inactivityDays, issueKey)
if (!addComment(issueKey, commentText)) {
failed++
return
}
// 2) Schließen
if (!closeIssue(issueKey, transitionIdClose)) {
failed++
return
}
closed++
logger.info("${issueKey}: erfolgreich geschlossen (transitionId=${transitionIdClose})")
}
if (!nextPageToken) break
}
logger.info("=== Auto-Close Job fertig ===")
logger.info("Processed=${processed}, Closed=${closed}, Skipped=${skipped}, Failed=${failed}")

View File

@ -0,0 +1,128 @@
package utils
/**
* -----------------------------------------------------------------------------
* FieldCopy (Utility)
* -----------------------------------------------------------------------------
*
* Zweck
* -----
* - Extrahiert Feldwerte aus einem Issue-JSON (Map), wie von Jira REST geliefert
* - Baut Update-Payloads für PUT /rest/api/3/issue/{key}
* - Unterstützt mehrere Felder per Mapping-Liste
*
* WICHTIG
* -------
* - Kein HTTP hier drin (kein get/put/post). Nur pure Logik.
* - ADF (Rich Text / Absatz) wird 1:1 als Map übernommen.
*
* Mapping-Format
* --------------
* List von Maps:
* [
* [source: "customfield_11501", target: "customfield_11501", allowNull: false],
* [source: "customfield_12345", target: "customfield_99999", allowNull: true ]
* ]
*
* allowNull=false: null wird NICHT geschrieben (Zielfeld bleibt unverändert)
* allowNull=true : null wird geschrieben (Zielfeld wird geleert)
* -----------------------------------------------------------------------------
*/
class FieldCopy {
/**
* Liest den Rohwert eines Feldes aus dem Issue-JSON.
*/
static Object getFieldValue(Map issueJson, String fieldId) {
if (issueJson == null) return null
def fields = issueJson.get("fields")
if (!(fields instanceof Map)) return null
return (fields as Map).get(fieldId)
}
/**
* Baut einen Update-Body für ein einzelnes Feld.
*
* @return Map im Format: [fields: [(fieldId): value]]
*/
static Map buildSingleFieldUpdateBody(String fieldId, Object value) {
Map fieldsPayload = [:]
fieldsPayload.put(fieldId, value)
Map body = [:]
body.put("fields", fieldsPayload)
return body
}
/**
* Baut einen Update-Body für mehrere Felder anhand der Mapping-Liste.
*
* @return Map {fields:{...}} oder null, wenn nichts geschrieben werden soll
*/
static Map buildMultiFieldUpdateBody(Map sourceJson, List fieldMappings) {
if (sourceJson == null) return null
if (fieldMappings == null || fieldMappings.isEmpty()) return null
Map fieldsPayload = [:]
for (def m : fieldMappings) {
if (!(m instanceof Map)) {
continue
}
String sourceField = (m.get("source") ?: "").toString()
if (!sourceField) {
continue
}
String targetField = m.containsKey("target") && m.get("target") != null
? m.get("target").toString()
: sourceField
boolean allowNull = false
if (m.containsKey("allowNull") && m.get("allowNull") != null) {
allowNull = (m.get("allowNull") as Boolean)
}
Object value = getFieldValue(sourceJson, sourceField)
// null nur setzen, wenn explizit erlaubt
if (value == null && !allowNull) {
continue
}
fieldsPayload.put(targetField, value)
}
if (fieldsPayload.isEmpty()) {
return null
}
Map body = [:]
body.put("fields", fieldsPayload)
return body
}
/**
* Hilfsfunktion: Liefert die Source-Feldliste (für Jira "fields" QueryString)
* als Komma-Sequenz: "customfield_1,customfield_2"
*/
static String buildSourceFieldQuery(List fieldMappings) {
if (fieldMappings == null || fieldMappings.isEmpty()) return ""
List result = []
for (def m : fieldMappings) {
if (!(m instanceof Map)) continue
def s = m.get("source")
if (s == null) continue
String sourceField = s.toString()
if (!sourceField) continue
if (!result.contains(sourceField)) {
result.add(sourceField)
}
}
return result.join(",")
}
}

View File

@ -0,0 +1,33 @@
package utils
class LinkedIssueTransitions {
static String findSingleLinkedTargetKey(Map issueJson,
String linkTypeName,
String targetProjectKey) {
def links = (issueJson?.fields?.issuelinks ?: []) as List
if (!links) return null
String prefix = "${targetProjectKey}-"
def targets = [] as List<String>
links.each { l ->
def inwardName = l?.type?.inward?.toString()
def outwardName = l?.type?.outward?.toString()
if (inwardName == linkTypeName && l?.inwardIssue?.key) {
def k = l.inwardIssue.key.toString()
if (k.startsWith(prefix)) targets << k
}
if (outwardName == linkTypeName && l?.outwardIssue?.key) {
def k = l.outwardIssue.key.toString()
if (k.startsWith(prefix)) targets << k
}
}
targets = targets.unique()
if (targets.size() != 1) return null
return targets[0]
}
}