Make the TTL-based cleaning more aggressive

It was discovered that the TTL-based cleaning could build up
a significant backlog of the rdataset headers during the periods where
the top of the TTL heap isn't expired yet.  Make the TTL-based cleaning
more aggressive by cleaning more headers from the heap when we are
adding new header into the RBTDB.

(cherry picked from commit d8220ca4ca)
This commit is contained in:
Ondřej Surý 2024-02-20 08:50:58 +01:00
parent 756555dbcf
commit b4d9f1cbab
No known key found for this signature in database
GPG key ID: 2820F37E873DEA41

View file

@ -355,6 +355,14 @@ hash_32(uint32_t val, unsigned int bits) {
#define DEFAULT_CACHE_NODE_LOCK_COUNT 17
#endif /* DNS_RBTDB_CACHE_NODE_LOCK_COUNT */
/*
* This defines the number of headers that we try to expire each time the
* expire_ttl_headers() is run. The number should be small enough, so the
* TTL-based header expiration doesn't take too long, but it should be large
* enough, so we expire enough headers if their TTL is clustered.
*/
#define DNS_RBTDB_EXPIRE_TTL_COUNT 10
typedef struct {
nodelock_t lock;
/* Protected in the refcount routines. */
@ -6867,6 +6875,10 @@ rdataset_size(rdatasetheader_t *header) {
return (sizeof(*header));
}
static void
expire_ttl_headers(dns_rbtdb_t *rbtdb, unsigned int locknum, bool tree_locked,
isc_stdtime_t now);
static isc_result_t
addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
isc_stdtime_t now, dns_rdataset_t *rdataset, unsigned int options,
@ -6876,7 +6888,6 @@ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
rbtdb_version_t *rbtversion = version;
isc_region_t region;
rdatasetheader_t *newheader;
rdatasetheader_t *header;
isc_result_t result;
bool delegating;
bool newnsec;
@ -7049,20 +7060,7 @@ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
cleanup_dead_nodes(rbtdb, rbtnode->locknum);
}
header = isc_heap_element(rbtdb->heaps[rbtnode->locknum], 1);
if (header != NULL) {
dns_ttl_t rdh_ttl = header->rdh_ttl;
/* Only account for stale TTL if cache is not overmem */
if (!cache_is_overmem) {
rdh_ttl += STALE_TTL(header, rbtdb);
}
if (rdh_ttl < now - RBTDB_VIRTUAL) {
expire_header(rbtdb, header, tree_locked,
expire_ttl);
}
}
expire_ttl_headers(rbtdb, rbtnode->locknum, tree_locked, now);
/*
* If we've been holding a write lock on the tree just for
@ -10435,3 +10433,40 @@ expire_header(dns_rbtdb_t *rbtdb, rdatasetheader_t *header, bool tree_locked,
}
}
}
/*
* Caller must be holding the node write lock.
*/
static void
expire_ttl_headers(dns_rbtdb_t *rbtdb, unsigned int locknum, bool tree_locked,
isc_stdtime_t now) {
isc_heap_t *heap = rbtdb->heaps[locknum];
for (size_t i = 0; i < DNS_RBTDB_EXPIRE_TTL_COUNT; i++) {
rdatasetheader_t *header = isc_heap_element(heap, 1);
if (header == NULL) {
/* No headers left on this TTL heap; exit cleaning */
return;
}
dns_ttl_t ttl = header->rdh_ttl;
if (!isc_mem_isovermem(rbtdb->common.mctx)) {
/* Only account for stale TTL if cache is not overmem */
ttl += STALE_TTL(header, rbtdb);
}
if (ttl >= now - RBTDB_VIRTUAL) {
/*
* The header at the top of this TTL heap is not yet
* eligible for expiry, so none of the other headers on
* the same heap can be eligible for expiry, either;
* exit cleaning.
*/
return;
}
expire_header(rbtdb, header, tree_locked, expire_ttl);
}
}