Fred Friis
03/21/2024, 7:04 PMFred Friis
03/21/2024, 7:04 PMfun batchDelete(multiLocationIds: Set<ProductClassCategoryEntity>) {
val entityClassName = table.tableSchema().itemType().rawClass()
multiLocationIds.chunked(DEFAULT_WRITE_BATCH_SIZE) { chunk ->
var attemptCount = 0
var exponentialBackoffMultiplier = 1
var keysToDelete = chunk.map(table::keyFrom)
while (keysToDelete.isNotEmpty()) { // TODO the other batch methods use idiomatic Kotlin, here we use old-school while - should lbe harmonized (prefer modern style)
if (attemptCount >= DEFAULT_WRITE_BATCH_MAX_ATTEMPTS) {
// TODO the other batch methods return results (including fails) as objects but delete throws exception - should be harmonized (I prefer objects to exceptions)
throw IOException("Failed to batch delete $entityClassName after $DEFAULT_WRITE_BATCH_MAX_ATTEMPTS attempts: ${keysToDelete.prettyPrint()}")
}
val rateLimitWaitMs = DEFAULT_WRITE_BATCH_WAIT_PER_ITEM_MS * keysToDelete.size * exponentialBackoffMultiplier
<http://logger.info|logger.info> { "Batch deleting $entityClassName with rate limit wait of $rateLimitWaitMs ms: ${keysToDelete.prettyPrint()}" }
keysToDelete = internalBatchDelete(keysToDelete)
Thread.sleep(rateLimitWaitMs)
attemptCount += 1
exponentialBackoffMultiplier *= 2
}
}
}
private fun internalBatchDelete(keys: List<Key>): List<Key> {
// instantiate write
val writeBatchBuilder = WriteBatch
.builder(table.tableSchema().itemType().rawClass())
.mappedTableResource(table)
// populate it (it sucks that you have to mutate each item)
keys.forEach(writeBatchBuilder::addDeleteItem)
// do the delete and record the outcome
return enhancedClient
.batchWriteItem(
BatchWriteItemEnhancedRequest
.builder()
.addWriteBatch(writeBatchBuilder.build())
.build()
).unprocessedDeleteItemsForTable(table)
}