KiCad PCB EDA Suite
Loading...
Searching...
No Matches
rtree_node.h
Go to the documentation of this file.
1/*
2 * This program source code file is part of KiCad, a free EDA CAD application.
3 *
4 * Copyright The KiCad Developers, see AUTHORS.txt for contributors.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 3
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you may find one here:
18 * http://www.gnu.org/licenses/old-licenses/gpl-3.0.html
19 * or you may search the http://www.gnu.org website for the version 3 license,
20 * or you may write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
22 */
23
24#ifndef RTREE_NODE_H
25#define RTREE_NODE_H
26
27#include <atomic>
28#include <bitset>
29#include <cassert>
30#include <cstdint>
31#include <cstring>
32#include <limits>
33#include <memory>
34#include <vector>
35
36#if defined( __SSE2__ )
37#include <emmintrin.h>
38#elif defined( __ARM_NEON ) || defined( __ARM_NEON__ )
39#include <arm_neon.h>
40#endif
41
42namespace KIRTREE
43{
44
57inline uint64_t HilbertXY2D( int aOrder, uint32_t aX, uint32_t aY )
58{
59 uint64_t d = 0;
60 uint32_t x = aX;
61 uint32_t y = aY;
62
63 for( uint32_t s = ( 1U << ( aOrder - 1 ) ); s > 0; s >>= 1 )
64 {
65 uint32_t rx = ( x & s ) ? 1 : 0;
66 uint32_t ry = ( y & s ) ? 1 : 0;
67
68 d += static_cast<uint64_t>( s ) * s * ( ( 3 * rx ) ^ ry );
69
70 // Rotate quadrant
71 if( ry == 0 )
72 {
73 if( rx == 1 )
74 {
75 x = s * 2 - 1 - x;
76 y = s * 2 - 1 - y;
77 }
78
79 uint32_t tmp = x;
80 x = y;
81 y = tmp;
82 }
83 }
84
85 return d;
86}
87
88
100template <int NUMDIMS>
101inline uint64_t HilbertND2D( int aOrder, const uint32_t aCoords[NUMDIMS] )
102{
103 if constexpr( NUMDIMS == 2 )
104 {
105 return HilbertXY2D( aOrder, aCoords[0], aCoords[1] );
106 }
107 else
108 {
109 // For 3+ dimensions, fall back to Z-order (Morton) curve interleaving.
110 // This gives good-enough clustering without the complexity of generalized
111 // Hilbert in N dimensions. Each coordinate contributes aOrder bits, interleaved.
112 uint64_t d = 0;
113
114 for( int bit = aOrder - 1; bit >= 0; --bit )
115 {
116 for( int dim = NUMDIMS - 1; dim >= 0; --dim )
117 {
118 d <<= 1;
119 d |= ( aCoords[dim] >> bit ) & 1;
120 }
121 }
122
123 return d;
124 }
125}
126
127
147template <class ELEMTYPE, int FANOUT>
148inline uint32_t OverlapMask2D( const ELEMTYPE* aBoundsMin0, const ELEMTYPE* aBoundsMax0,
149 const ELEMTYPE* aBoundsMin1, const ELEMTYPE* aBoundsMax1,
150 int aCount,
151 const ELEMTYPE aMin[2], const ELEMTYPE aMax[2] )
152{
153 static_assert( FANOUT <= 31, "OverlapMask2D requires FANOUT <= 31" );
154 static_assert( FANOUT % 4 == 0, "OverlapMask2D requires FANOUT divisible by 4 for SIMD" );
155
156#if defined( __SSE2__ )
157 if constexpr( sizeof( ELEMTYPE ) == 4 )
158 {
159 // Broadcast query bounds to all 4 SIMD lanes
160 __m128i qmin0 = _mm_set1_epi32( static_cast<int>( aMin[0] ) );
161 __m128i qmax0 = _mm_set1_epi32( static_cast<int>( aMax[0] ) );
162 __m128i qmin1 = _mm_set1_epi32( static_cast<int>( aMin[1] ) );
163 __m128i qmax1 = _mm_set1_epi32( static_cast<int>( aMax[1] ) );
164
165 uint32_t mask = 0;
166
167 // Test 4 children per iteration using 128-bit packed integer comparisons.
168 // Each iteration loads one 16-byte chunk from each of the 4 bounds arrays,
169 // tests all 4 separation axes, and extracts the result to 4 mask bits via
170 // movemask (single instruction to extract the top bit of each 32-bit lane).
171 for( int j = 0; j < FANOUT; j += 4 )
172 {
173 __m128i bmin0 = _mm_loadu_si128(
174 reinterpret_cast<const __m128i*>( &aBoundsMin0[j] ) );
175 __m128i bmax0 = _mm_loadu_si128(
176 reinterpret_cast<const __m128i*>( &aBoundsMax0[j] ) );
177 __m128i bmin1 = _mm_loadu_si128(
178 reinterpret_cast<const __m128i*>( &aBoundsMin1[j] ) );
179 __m128i bmax1 = _mm_loadu_si128(
180 reinterpret_cast<const __m128i*>( &aBoundsMax1[j] ) );
181
182 // Any separation axis proves non-overlap
183 __m128i fail = _mm_or_si128(
184 _mm_or_si128( _mm_cmpgt_epi32( bmin0, qmax0 ),
185 _mm_cmpgt_epi32( qmin0, bmax0 ) ),
186 _mm_or_si128( _mm_cmpgt_epi32( bmin1, qmax1 ),
187 _mm_cmpgt_epi32( qmin1, bmax1 ) ) );
188
189 // Extract top bit of each 32-bit lane into a 4-bit integer
190 int failBits = _mm_movemask_ps( _mm_castsi128_ps( fail ) );
191 mask |= static_cast<uint32_t>( ~failBits & 0xF ) << j;
192 }
193
194 return mask & ( ( 1U << aCount ) - 1 );
195 }
196 else
197#elif defined( __ARM_NEON ) || defined( __ARM_NEON__ )
198 if constexpr( sizeof( ELEMTYPE ) == 4 )
199 {
200 // Broadcast query bounds to all 4 NEON lanes
201 int32x4_t qmin0 = vdupq_n_s32( static_cast<int32_t>( aMin[0] ) );
202 int32x4_t qmax0 = vdupq_n_s32( static_cast<int32_t>( aMax[0] ) );
203 int32x4_t qmin1 = vdupq_n_s32( static_cast<int32_t>( aMin[1] ) );
204 int32x4_t qmax1 = vdupq_n_s32( static_cast<int32_t>( aMax[1] ) );
205
206 // Bit-position shifts for converting per-lane fail bits into a bitmask.
207 // Lane 0 stays at bit 0, lane 1 shifts left by 1, etc.
208 static const int32_t kBitShifts[4] = { 0, 1, 2, 3 };
209 int32x4_t shifts = vld1q_s32( kBitShifts );
210
211 uint32_t mask = 0;
212
213 // Test 4 children per iteration using 128-bit NEON comparisons.
214 // NEON lacks movemask, so we extract each lane's 1-bit result via
215 // right-shift-31, position it with a variable left shift, and
216 // OR-reduce the 4 lanes to a 4-bit chunk.
217 for( int j = 0; j < FANOUT; j += 4 )
218 {
219 int32x4_t bmin0 = vld1q_s32(
220 reinterpret_cast<const int32_t*>( &aBoundsMin0[j] ) );
221 int32x4_t bmax0 = vld1q_s32(
222 reinterpret_cast<const int32_t*>( &aBoundsMax0[j] ) );
223 int32x4_t bmin1 = vld1q_s32(
224 reinterpret_cast<const int32_t*>( &aBoundsMin1[j] ) );
225 int32x4_t bmax1 = vld1q_s32(
226 reinterpret_cast<const int32_t*>( &aBoundsMax1[j] ) );
227
228 // Any separation axis proves non-overlap (vcgtq_s32 yields all-1s on true)
229 uint32x4_t fail = vorrq_u32(
230 vorrq_u32( vcgtq_s32( bmin0, qmax0 ),
231 vcgtq_s32( qmin0, bmax0 ) ),
232 vorrq_u32( vcgtq_s32( bmin1, qmax1 ),
233 vcgtq_s32( qmin1, bmax1 ) ) );
234
235 // Extract sign bit (0 or 1) from each lane, shift to bit position
236 uint32x4_t bits = vshrq_n_u32( fail, 31 );
237 uint32x4_t positioned = vshlq_u32( bits, shifts );
238
239 // OR-reduce 4 lanes into a 4-bit fail mask
240 uint32x2_t half = vorr_u32( vget_low_u32( positioned ),
241 vget_high_u32( positioned ) );
242 uint32_t failMask = vget_lane_u32( half, 0 ) | vget_lane_u32( half, 1 );
243
244 mask |= static_cast<uint32_t>( ~failMask & 0xF ) << j;
245 }
246
247 return mask & ( ( 1U << aCount ) - 1 );
248 }
249 else
250#endif
251 {
252 // Scalar fallback with a two-pass structure for auto-vectorization.
253 //
254 // The first loop performs branchless comparisons across all FANOUT slots,
255 // writing boolean results to a flat array. This fixed-trip-count pattern
256 // without data-dependent exits allows GCC and Clang to auto-vectorize the
257 // comparisons even at baseline SSE2/NEON instruction levels.
258 //
259 // The second loop packs the booleans into a bitmask. The variable-shift
260 // bit-packing is inherently scalar but operates on only FANOUT iterations.
261 int result[FANOUT];
262
263 for( int i = 0; i < FANOUT; ++i )
264 {
265 result[i] = ( aBoundsMin0[i] <= aMax[0] )
266 & ( aBoundsMax0[i] >= aMin[0] )
267 & ( aBoundsMin1[i] <= aMax[1] )
268 & ( aBoundsMax1[i] >= aMin[1] );
269 }
270
271 uint32_t mask = 0;
272
273 for( int i = 0; i < FANOUT; ++i )
274 mask |= ( static_cast<uint32_t>( result[i] ) << i );
275
276 return mask & ( ( 1U << aCount ) - 1 );
277 }
278}
279
280
297template <class DATATYPE, class ELEMTYPE, int NUMDIMS, int MAXNODES>
299{
300 static constexpr int MINNODES = MAXNODES * 2 / 5; // ~40%, R*-tree convention
301
302 int count = 0; // Number of valid children
303 int level = 0; // 0 = leaf, higher = internal
304
305 // SoA bounding boxes: bounds[axis*2+side][slot]
306 // side 0 = min, side 1 = max
307 ELEMTYPE bounds[NUMDIMS * 2][MAXNODES];
308
309 union
310 {
311 RTREE_NODE* children[MAXNODES]; // Internal node children
312 DATATYPE data[MAXNODES]; // Leaf node data
313 };
314
315 // Stored insertion bboxes for leaf entries, enabling O(log N) removal
316 // even when the item has moved since insertion
317 ELEMTYPE insertBounds[NUMDIMS * 2][MAXNODES];
318
319 std::atomic<int> refcount; // For CoW support
320
322 {
323 std::memset( bounds, 0, sizeof( bounds ) );
324 std::memset( insertBounds, 0, sizeof( insertBounds ) );
325 std::memset( &children, 0, sizeof( children ) );
326 }
327
328 bool IsLeaf() const { return level == 0; }
329 bool IsInternal() const { return level > 0; }
330 bool IsFull() const { return count >= MAXNODES; }
331 bool IsUnderflow() const { return count < MINNODES; }
332
336 void ComputeEnclosingBounds( ELEMTYPE aMin[NUMDIMS], ELEMTYPE aMax[NUMDIMS] ) const
337 {
338 for( int d = 0; d < NUMDIMS; ++d )
339 {
340 aMin[d] = std::numeric_limits<ELEMTYPE>::max();
341 aMax[d] = std::numeric_limits<ELEMTYPE>::lowest();
342 }
343
344 for( int i = 0; i < count; ++i )
345 {
346 for( int d = 0; d < NUMDIMS; ++d )
347 {
348 if( bounds[d * 2][i] < aMin[d] )
349 aMin[d] = bounds[d * 2][i];
350
351 if( bounds[d * 2 + 1][i] > aMax[d] )
352 aMax[d] = bounds[d * 2 + 1][i];
353 }
354 }
355 }
356
360 int64_t ChildArea( int i ) const
361 {
362 int64_t area = 1;
363
364 for( int d = 0; d < NUMDIMS; ++d )
365 area *= static_cast<int64_t>( bounds[d * 2 + 1][i] ) - bounds[d * 2][i];
366
367 return area;
368 }
369
373 bool ChildOverlaps( int i, const ELEMTYPE aMin[NUMDIMS], const ELEMTYPE aMax[NUMDIMS] ) const
374 {
375 for( int d = 0; d < NUMDIMS; ++d )
376 {
377 if( bounds[d * 2][i] > aMax[d] || bounds[d * 2 + 1][i] < aMin[d] )
378 return false;
379 }
380
381 return true;
382 }
383
388 uint32_t ChildOverlapMask( const ELEMTYPE aMin[NUMDIMS],
389 const ELEMTYPE aMax[NUMDIMS] ) const
390 {
391 if constexpr( NUMDIMS == 2 )
392 {
394 bounds[0], bounds[1], bounds[2], bounds[3], count, aMin, aMax );
395 }
396 else
397 {
398 uint32_t mask = 0;
399
400 for( int i = 0; i < count; ++i )
401 {
402 if( ChildOverlaps( i, aMin, aMax ) )
403 mask |= ( 1U << i );
404 }
405
406 return mask;
407 }
408 }
409
413 int64_t ChildOverlapArea( int i, const ELEMTYPE aMin[NUMDIMS],
414 const ELEMTYPE aMax[NUMDIMS] ) const
415 {
416 int64_t overlap = 1;
417
418 for( int d = 0; d < NUMDIMS; ++d )
419 {
420 ELEMTYPE lo = std::max( bounds[d * 2][i], aMin[d] );
421 ELEMTYPE hi = std::min( bounds[d * 2 + 1][i], aMax[d] );
422
423 if( lo > hi )
424 return 0;
425
426 overlap *= static_cast<int64_t>( hi ) - lo;
427 }
428
429 return overlap;
430 }
431
436 int64_t ChildEnlargement( int i, const ELEMTYPE aMin[NUMDIMS],
437 const ELEMTYPE aMax[NUMDIMS] ) const
438 {
439 int64_t originalArea = ChildArea( i );
440 int64_t enlargedArea = 1;
441
442 for( int d = 0; d < NUMDIMS; ++d )
443 {
444 ELEMTYPE lo = std::min( bounds[d * 2][i], aMin[d] );
445 ELEMTYPE hi = std::max( bounds[d * 2 + 1][i], aMax[d] );
446 enlargedArea *= static_cast<int64_t>( hi ) - lo;
447 }
448
449 return enlargedArea - originalArea;
450 }
451
456 int64_t ChildPerimeter( int i ) const
457 {
458 int64_t perimeter = 0;
459
460 for( int d = 0; d < NUMDIMS; ++d )
461 perimeter += static_cast<int64_t>( bounds[d * 2 + 1][i] ) - bounds[d * 2][i];
462
463 return 2 * perimeter;
464 }
465
469 void SetChildBounds( int i, const ELEMTYPE aMin[NUMDIMS], const ELEMTYPE aMax[NUMDIMS] )
470 {
471 for( int d = 0; d < NUMDIMS; ++d )
472 {
473 bounds[d * 2][i] = aMin[d];
474 bounds[d * 2 + 1][i] = aMax[d];
475 }
476 }
477
481 void GetChildBounds( int i, ELEMTYPE aMin[NUMDIMS], ELEMTYPE aMax[NUMDIMS] ) const
482 {
483 for( int d = 0; d < NUMDIMS; ++d )
484 {
485 aMin[d] = bounds[d * 2][i];
486 aMax[d] = bounds[d * 2 + 1][i];
487 }
488 }
489
493 void SetInsertBounds( int i, const ELEMTYPE aMin[NUMDIMS], const ELEMTYPE aMax[NUMDIMS] )
494 {
495 for( int d = 0; d < NUMDIMS; ++d )
496 {
497 insertBounds[d * 2][i] = aMin[d];
498 insertBounds[d * 2 + 1][i] = aMax[d];
499 }
500 }
501
505 void GetInsertBounds( int i, ELEMTYPE aMin[NUMDIMS], ELEMTYPE aMax[NUMDIMS] ) const
506 {
507 for( int d = 0; d < NUMDIMS; ++d )
508 {
509 aMin[d] = insertBounds[d * 2][i];
510 aMax[d] = insertBounds[d * 2 + 1][i];
511 }
512 }
513
517 void RemoveChild( int i )
518 {
519 assert( i < count );
520 int last = count - 1;
521
522 if( i != last )
523 {
524 for( int d = 0; d < NUMDIMS * 2; ++d )
525 {
526 bounds[d][i] = bounds[d][last];
527 insertBounds[d][i] = insertBounds[d][last];
528 }
529
530 if( IsLeaf() )
531 data[i] = data[last];
532 else
533 children[i] = children[last];
534 }
535
536 count--;
537 }
538};
539
540
550template <class NODE>
552{
553public:
554 static constexpr size_t NODES_PER_PAGE = 256;
555
556 struct PAGE
557 {
558 alignas( 64 ) NODE nodes[NODES_PER_PAGE];
559 std::bitset<NODES_PER_PAGE> used;
560
561 PAGE() : used() {}
562 };
563
564 SLAB_ALLOCATOR() = default;
565
566 ~SLAB_ALLOCATOR() = default;
567
568 // Non-copyable
569 SLAB_ALLOCATOR( const SLAB_ALLOCATOR& ) = delete;
571
572 // Movable
573 SLAB_ALLOCATOR( SLAB_ALLOCATOR&& aOther ) noexcept :
574 m_pages( std::move( aOther.m_pages ) ),
575 m_freeList( std::move( aOther.m_freeList ) )
576 {
577 }
578
580 {
581 if( this != &aOther )
582 {
583 m_pages = std::move( aOther.m_pages );
584 m_freeList = std::move( aOther.m_freeList );
585 }
586
587 return *this;
588 }
589
593 NODE* Allocate()
594 {
595 if( !m_freeList.empty() )
596 {
597 NODE* node = m_freeList.back();
598 m_freeList.pop_back();
599
600 // Re-mark as used in the owning page's bitset
601 markUsed( node );
602
603 new( node ) NODE();
604 return node;
605 }
606
607 // Find a page with free slots, or create one
608 for( auto& page : m_pages )
609 {
610 if( page->used.count() < NODES_PER_PAGE )
611 {
612 for( size_t i = 0; i < NODES_PER_PAGE; ++i )
613 {
614 if( !page->used[i] )
615 {
616 page->used.set( i );
617 NODE* node = &page->nodes[i];
618 new( node ) NODE();
619 return node;
620 }
621 }
622 }
623 }
624
625 // All pages full, allocate a new one
626 m_pages.push_back( std::make_unique<PAGE>() );
627 PAGE* page = m_pages.back().get();
628 page->used.set( 0 );
629 NODE* node = &page->nodes[0];
630 new( node ) NODE();
631 return node;
632 }
633
637 void Free( NODE* aNode )
638 {
639 if( !aNode )
640 return;
641
642 // Clear the used bit so the page-scan fallback path stays consistent
643 markUnused( aNode );
644
645 aNode->~NODE();
646 m_freeList.push_back( aNode );
647 }
648
652 size_t MemoryUsage() const
653 {
654 return m_pages.size() * sizeof( PAGE )
655 + m_freeList.capacity() * sizeof( NODE* );
656 }
657
661 bool Owns( const NODE* aNode ) const
662 {
663 size_t ignored = 0;
664 return findOwningPage( aNode, ignored ) != nullptr;
665 }
666
667private:
672 PAGE* findOwningPage( const NODE* aNode, size_t& aOffset ) const
673 {
674 const auto addr = reinterpret_cast<uintptr_t>( aNode );
675
676 for( auto& page : m_pages )
677 {
678 const auto begin = reinterpret_cast<uintptr_t>( &page->nodes[0] );
679 const auto end = reinterpret_cast<uintptr_t>( &page->nodes[NODES_PER_PAGE] );
680
681 if( addr >= begin && addr < end )
682 {
683 // Safe to subtract now since both pointers are in the same array
684 aOffset = static_cast<size_t>( aNode - &page->nodes[0] );
685 return page.get();
686 }
687 }
688
689 return nullptr;
690 }
691
692 void markUsed( NODE* aNode )
693 {
694 size_t offset = 0;
695 PAGE* page = findOwningPage( aNode, offset );
696
697 if( page )
698 page->used.set( offset );
699 }
700
701 void markUnused( NODE* aNode )
702 {
703 size_t offset = 0;
704 PAGE* page = findOwningPage( aNode, offset );
705
706 if( page )
707 page->used.reset( offset );
708 }
709
710 std::vector<std::unique_ptr<PAGE>> m_pages;
711 std::vector<NODE*> m_freeList;
712};
713
714} // namespace KIRTREE
715
716#endif // RTREE_NODE_H
NODE * Allocate()
Allocate a new node, either from the free list or from a new page.
Definition rtree_node.h:593
static constexpr size_t NODES_PER_PAGE
Definition rtree_node.h:554
size_t MemoryUsage() const
Return approximate memory usage in bytes.
Definition rtree_node.h:652
SLAB_ALLOCATOR & operator=(SLAB_ALLOCATOR &&aOther) noexcept
Definition rtree_node.h:579
std::vector< std::unique_ptr< PAGE > > m_pages
Definition rtree_node.h:710
SLAB_ALLOCATOR & operator=(const SLAB_ALLOCATOR &)=delete
void markUnused(NODE *aNode)
Definition rtree_node.h:701
bool Owns(const NODE *aNode) const
Check if a node was allocated from one of this allocator's pages.
Definition rtree_node.h:661
SLAB_ALLOCATOR(SLAB_ALLOCATOR &&aOther) noexcept
Definition rtree_node.h:573
void Free(NODE *aNode)
Return a node to the free list for reuse.
Definition rtree_node.h:637
SLAB_ALLOCATOR(const SLAB_ALLOCATOR &)=delete
PAGE * findOwningPage(const NODE *aNode, size_t &aOffset) const
Find the page that contains aNode using address-range comparison (no UB).
Definition rtree_node.h:672
std::vector< NODE * > m_freeList
Definition rtree_node.h:711
void markUsed(NODE *aNode)
Definition rtree_node.h:692
uint64_t HilbertND2D(int aOrder, const uint32_t aCoords[NUMDIMS])
Compute Hilbert index for N-dimensional coordinates.
Definition rtree_node.h:101
uint64_t HilbertXY2D(int aOrder, uint32_t aX, uint32_t aY)
Compute a 64-bit Hilbert curve index from 2D unsigned coordinates.
Definition rtree_node.h:57
uint32_t OverlapMask2D(const ELEMTYPE *aBoundsMin0, const ELEMTYPE *aBoundsMax0, const ELEMTYPE *aBoundsMin1, const ELEMTYPE *aBoundsMax1, int aCount, const ELEMTYPE aMin[2], const ELEMTYPE aMax[2])
Compute a bitmask of which child slots overlap a 2D query rectangle.
Definition rtree_node.h:148
int64_t ChildOverlapArea(int i, const ELEMTYPE aMin[NUMDIMS], const ELEMTYPE aMax[NUMDIMS]) const
Compute the overlap area between child slot i and the given box.
Definition rtree_node.h:413
bool IsUnderflow() const
Definition rtree_node.h:331
bool IsInternal() const
Definition rtree_node.h:329
int64_t ChildEnlargement(int i, const ELEMTYPE aMin[NUMDIMS], const ELEMTYPE aMax[NUMDIMS]) const
Compute how much child slot i's area would increase if it were enlarged to include the given bounding...
Definition rtree_node.h:436
int64_t ChildPerimeter(int i) const
Compute the perimeter (or margin for 3D) of child slot i's bounding box.
Definition rtree_node.h:456
int64_t ChildArea(int i) const
Compute the area (or volume for 3D) of child slot i's bounding box.
Definition rtree_node.h:360
void GetChildBounds(int i, ELEMTYPE aMin[NUMDIMS], ELEMTYPE aMax[NUMDIMS]) const
Get the bounding box for child slot i.
Definition rtree_node.h:481
bool IsFull() const
Definition rtree_node.h:330
uint32_t ChildOverlapMask(const ELEMTYPE aMin[NUMDIMS], const ELEMTYPE aMax[NUMDIMS]) const
Bitmask of children whose bounding boxes overlap the query rectangle.
Definition rtree_node.h:388
bool ChildOverlaps(int i, const ELEMTYPE aMin[NUMDIMS], const ELEMTYPE aMax[NUMDIMS]) const
Test whether child slot i's bounding box overlaps with the given query box.
Definition rtree_node.h:373
bool IsLeaf() const
Definition rtree_node.h:328
void SetChildBounds(int i, const ELEMTYPE aMin[NUMDIMS], const ELEMTYPE aMax[NUMDIMS])
Set the bounding box for child slot i.
Definition rtree_node.h:469
void SetInsertBounds(int i, const ELEMTYPE aMin[NUMDIMS], const ELEMTYPE aMax[NUMDIMS])
Store the insertion bounding box for leaf entry i.
Definition rtree_node.h:493
void GetInsertBounds(int i, ELEMTYPE aMin[NUMDIMS], ELEMTYPE aMax[NUMDIMS]) const
Get the stored insertion bounding box for leaf entry i.
Definition rtree_node.h:505
void ComputeEnclosingBounds(ELEMTYPE aMin[NUMDIMS], ELEMTYPE aMax[NUMDIMS]) const
Compute the bounding box that encloses all children in this node.
Definition rtree_node.h:336
void RemoveChild(int i)
Remove child at slot i by swapping with last entry.
Definition rtree_node.h:517
NODE nodes[NODES_PER_PAGE]
Definition rtree_node.h:558
std::bitset< NODES_PER_PAGE > used
Definition rtree_node.h:559
VECTOR2I end
wxString result
Test unit parsing edge cases and error handling.