From 5968a41c10c68c634b71222772c1a4b07024d45e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20D=C3=BCsterhus?= Date: Tue, 4 Jun 2024 12:02:15 +0200 Subject: [PATCH] Replace `ZEND_ASSUME()` by `ZEND_ASSERT()` in `zend_hash_*_ptr` setters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I had a case where I was intentionally storing a `NULL` pointer within a HashTable to mark an entry as “blocked”, without paying for the overhead of checking the entry type when reading the pointer, resulting in semantics that are similar to using `isset()` instead of `array_key_exists()` in userland. This worked fine in unoptimized test builds, but due to the `ZEND_ASSUME()` in the `zend_hash_find_ptr` functions, the optimized release builds turned the logic of: my_pointer = zend_hash_find_ptr(ht, key); if (my_pointer == NULL) { return; } *my_pointer; into zv = zend_hash_find(ht, key); if (zv) { *Z_PTR_P(zv); } else { return; } thus introducing a hard-to-debug and compiler-dependent crash when the entry exists, but the stored pointer is `NULL`. Change the `ZEND_ASSUME()` in the setters to `ZEND_ASSERT()`. This would have made my mistake immediately obvious in debug builds when storing the pointer. The getters still use `ZEND_ASSUME()` under the assumption that they are called much more often, reducing the impact on debug builds: Assuming the developer uses the `_ptr` variants for both reading and writing the entries, the mistake will be reliably caught during writing, making the assert during reading unnecessary. For release builds the `ZEND_ASSERT()` will be equivalent to `ZEND_ASSUME()`, avoiding any performance impact for those. --- Zend/zend_hash.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Zend/zend_hash.h b/Zend/zend_hash.h index 89885557b044f..d4385d162a0fc 100644 --- a/Zend/zend_hash.h +++ b/Zend/zend_hash.h @@ -646,7 +646,7 @@ static zend_always_inline void *zend_hash_add_ptr(HashTable *ht, zend_string *ke ZVAL_PTR(&tmp, pData); zv = zend_hash_add(ht, key, &tmp); if (zv) { - ZEND_ASSUME(Z_PTR_P(zv)); + ZEND_ASSERT(Z_PTR_P(zv)); return Z_PTR_P(zv); } else { return NULL; @@ -660,7 +660,7 @@ static zend_always_inline void *zend_hash_add_new_ptr(HashTable *ht, zend_string ZVAL_PTR(&tmp, pData); zv = zend_hash_add_new(ht, key, &tmp); if (zv) { - ZEND_ASSUME(Z_PTR_P(zv)); + ZEND_ASSERT(Z_PTR_P(zv)); return Z_PTR_P(zv); } else { return NULL; @@ -674,7 +674,7 @@ static zend_always_inline void *zend_hash_str_add_ptr(HashTable *ht, const char ZVAL_PTR(&tmp, pData); zv = zend_hash_str_add(ht, str, len, &tmp); if (zv) { - ZEND_ASSUME(Z_PTR_P(zv)); + ZEND_ASSERT(Z_PTR_P(zv)); return Z_PTR_P(zv); } else { return NULL; @@ -688,7 +688,7 @@ static zend_always_inline void *zend_hash_str_add_new_ptr(HashTable *ht, const c ZVAL_PTR(&tmp, pData); zv = zend_hash_str_add_new(ht, str, len, &tmp); if (zv) { - ZEND_ASSUME(Z_PTR_P(zv)); + ZEND_ASSERT(Z_PTR_P(zv)); return Z_PTR_P(zv); } else { return NULL; @@ -701,7 +701,7 @@ static zend_always_inline void *zend_hash_update_ptr(HashTable *ht, zend_string ZVAL_PTR(&tmp, pData); zv = zend_hash_update(ht, key, &tmp); - ZEND_ASSUME(Z_PTR_P(zv)); + ZEND_ASSERT(Z_PTR_P(zv)); return Z_PTR_P(zv); } @@ -711,7 +711,7 @@ static zend_always_inline void *zend_hash_str_update_ptr(HashTable *ht, const ch ZVAL_PTR(&tmp, pData); zv = zend_hash_str_update(ht, str, len, &tmp); - ZEND_ASSUME(Z_PTR_P(zv)); + ZEND_ASSERT(Z_PTR_P(zv)); return Z_PTR_P(zv); } @@ -809,7 +809,7 @@ static zend_always_inline void *zend_hash_index_update_ptr(HashTable *ht, zend_u ZVAL_PTR(&tmp, pData); zv = zend_hash_index_update(ht, h, &tmp); - ZEND_ASSUME(Z_PTR_P(zv)); + ZEND_ASSERT(Z_PTR_P(zv)); return Z_PTR_P(zv); } @@ -833,7 +833,7 @@ static zend_always_inline void *zend_hash_next_index_insert_ptr(HashTable *ht, v ZVAL_PTR(&tmp, pData); zv = zend_hash_next_index_insert(ht, &tmp); if (zv) { - ZEND_ASSUME(Z_PTR_P(zv)); + ZEND_ASSERT(Z_PTR_P(zv)); return Z_PTR_P(zv); } else { return NULL;