@@ -415,50 +415,77 @@ stderr_last_error(char *msg)
415
415
/* OS Allocation */
416
416
/*****************/
417
417
418
- #ifndef HAVE_MREMAP
419
- static void * zend_mm_mmap_fixed (void * addr , size_t size )
418
+ static void zend_mm_munmap (void * addr , size_t size )
420
419
{
421
420
#ifdef _WIN32
422
- return VirtualAlloc (addr , size , MEM_COMMIT | MEM_RESERVE , PAGE_READWRITE );
421
+ MEMORY_BASIC_INFORMATION mbi ;
422
+ if (VirtualQuery (addr , & mbi , sizeof (mbi )) == 0 ) {
423
+ #if ZEND_MM_ERROR
424
+ stderr_last_error ("VirtualQuery() failed" );
425
+ #endif
426
+ }
427
+ addr = mbi .AllocationBase ;
428
+
429
+ if (VirtualFree (addr , 0 , MEM_RELEASE ) == 0 ) {
430
+ #if ZEND_MM_ERROR
431
+ stderr_last_error ("VirtualFree() failed" );
432
+ #endif
433
+ }
423
434
#else
424
- int flags = MAP_PRIVATE | MAP_ANON ;
425
- #if defined( MAP_EXCL )
426
- flags |= MAP_FIXED | MAP_EXCL ;
435
+ if ( munmap ( addr , size ) != 0 ) {
436
+ #if ZEND_MM_ERROR
437
+ fprintf ( stderr , "\nmunmap() failed: [%d] %s\n" , errno , strerror ( errno )) ;
427
438
#endif
428
- /* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
429
- void * ptr = mmap (addr , size , PROT_READ | PROT_WRITE , flags /*| MAP_POPULATE | MAP_HUGETLB*/ , ZEND_MM_FD , 0 );
439
+ }
440
+ #endif
441
+ }
430
442
431
- if (ptr == MAP_FAILED ) {
432
- #if ZEND_MM_ERROR && !defined(MAP_EXCL )
433
- fprintf (stderr , "\nmmap() failed: [%d] %s\n" , errno , strerror (errno ));
443
+ #ifdef _WIN32
444
+ static void * zend_mm_mmap_win_reserve (size_t size )
445
+ {
446
+ void * ptr = VirtualAlloc (NULL , size , MEM_RESERVE , PAGE_READWRITE );
447
+ if (ptr == NULL ) {
448
+ #if ZEND_MM_ERROR
449
+ stderr_last_error ("VirtualAlloc() reserve failed" );
434
450
#endif
435
451
return NULL ;
436
- } else if (ptr != addr ) {
437
- if (munmap (ptr , size ) != 0 ) {
452
+ }
453
+ return ptr ;
454
+ }
455
+
456
+ #ifndef _WIN64
457
+ static void * zend_mm_mmap_win_reserve_fixed (void * addr , size_t size )
458
+ {
459
+ void * ptr = VirtualAlloc (addr , size , MEM_RESERVE , PAGE_READWRITE );
460
+ if (ptr == NULL ) {
461
+ /** ERROR_INVALID_ADDRESS is expected when addr - addr+size is not free */
462
+ if (GetLastError () != ERROR_INVALID_ADDRESS ) {
438
463
#if ZEND_MM_ERROR
439
- fprintf ( stderr , "\nmunmap () failed: [%d] %s\n" , errno , strerror ( errno ) );
464
+ stderr_last_error ( "VirtualAlloc () reserve fixed failed" );
440
465
#endif
441
466
}
467
+ SetLastError (0 );
442
468
return NULL ;
443
469
}
444
470
return ptr ;
445
- #endif
446
471
}
447
472
#endif
448
473
449
- static void * zend_mm_mmap ( size_t size )
474
+ static void * zend_mm_mmap_win_commit ( void * addr , size_t size )
450
475
{
451
- #ifdef _WIN32
452
- void * ptr = VirtualAlloc (NULL , size , MEM_COMMIT | MEM_RESERVE , PAGE_READWRITE );
453
-
476
+ void * ptr = VirtualAlloc (addr , size , MEM_COMMIT , PAGE_READWRITE );
454
477
if (ptr == NULL ) {
455
478
#if ZEND_MM_ERROR
456
- stderr_last_error ("VirtualAlloc() failed" );
479
+ stderr_last_error ("VirtualAlloc() commit failed" );
457
480
#endif
458
481
return NULL ;
459
482
}
483
+ ZEND_ASSERT (ptr == addr );
460
484
return ptr ;
461
- #else
485
+ }
486
+ #else /* not Windows */
487
+ static void * zend_mm_mmap (size_t size )
488
+ {
462
489
void * ptr ;
463
490
464
491
#ifdef MAP_HUGETLB
@@ -471,33 +498,39 @@ static void *zend_mm_mmap(size_t size)
471
498
#endif
472
499
473
500
ptr = mmap (NULL , size , PROT_READ | PROT_WRITE , MAP_PRIVATE | MAP_ANON , ZEND_MM_FD , 0 );
474
-
475
501
if (ptr == MAP_FAILED ) {
476
502
#if ZEND_MM_ERROR
477
503
fprintf (stderr , "\nmmap() failed: [%d] %s\n" , errno , strerror (errno ));
478
504
#endif
479
505
return NULL ;
480
506
}
481
507
return ptr ;
482
- #endif
483
508
}
509
+ #endif
484
510
485
- static void zend_mm_munmap (void * addr , size_t size )
511
+ #ifndef HAVE_MREMAP
512
+ #ifndef _WIN32
513
+ static void * zend_mm_mmap_fixed (void * addr , size_t size )
486
514
{
487
- #ifdef _WIN32
488
- if (VirtualFree (addr , 0 , MEM_RELEASE ) == 0 ) {
489
- #if ZEND_MM_ERROR
490
- stderr_last_error ("VirtualFree() failed" );
515
+ int flags = MAP_PRIVATE | MAP_ANON ;
516
+ #if defined(MAP_EXCL )
517
+ flags |= MAP_FIXED | MAP_EXCL ;
491
518
#endif
492
- }
493
- #else
494
- if (munmap ( addr , size ) != 0 ) {
495
- #if ZEND_MM_ERROR
496
- fprintf (stderr , "\nmunmap() failed: [%d] %s\n" , errno , strerror (errno ));
519
+ /* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
520
+ void * ptr = mmap ( addr , size , PROT_READ | PROT_WRITE , flags /*| MAP_POPULATE | MAP_HUGETLB*/ , ZEND_MM_FD , 0 );
521
+ if (ptr == MAP_FAILED ) {
522
+ #if ZEND_MM_ERROR && !defined( MAP_EXCL )
523
+ fprintf (stderr , "\nmmap() fixed failed: [%d] %s\n" , errno , strerror (errno ));
497
524
#endif
525
+ return NULL ;
526
+ } else if (ptr != addr ) {
527
+ zend_mm_munmap (ptr , size );
528
+ return NULL ;
498
529
}
499
- #endif
530
+ return ptr ;
500
531
}
532
+ #endif
533
+ #endif
501
534
502
535
/***********/
503
536
/* Bitmask */
@@ -663,35 +696,47 @@ static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitse
663
696
664
697
static void * zend_mm_chunk_alloc_int (size_t size , size_t alignment )
665
698
{
666
- void * ptr = zend_mm_mmap (size );
667
-
699
+ #ifdef _WIN32
700
+ void * ptr = NULL ;
701
+ size_t offset ;
702
+ #ifndef _WIN64
703
+ /* 32-bit build has limited memory address space and partical deallocation is not supported,
704
+ * try to reserve exact aligned size first, GH-9650 */
705
+ ptr = zend_mm_mmap_win_reserve (size + alignment );
668
706
if (ptr == NULL ) {
669
707
return NULL ;
670
- } else if (ZEND_MM_ALIGNED_OFFSET (ptr , alignment ) == 0 ) {
671
- #ifdef MADV_HUGEPAGE
672
- if (zend_mm_use_huge_pages ) {
673
- madvise (ptr , size , MADV_HUGEPAGE );
674
- }
675
- #endif
676
- return ptr ;
677
- } else {
678
- size_t offset ;
708
+ }
679
709
680
- /* chunk has to be aligned */
681
- zend_mm_munmap ( ptr , size );
682
- ptr = zend_mm_mmap ( size + alignment - REAL_PAGE_SIZE ) ;
683
- #ifdef _WIN32
684
- offset = ZEND_MM_ALIGNED_OFFSET (ptr , alignment );
685
- zend_mm_munmap ( ptr , size + alignment - REAL_PAGE_SIZE );
686
- ptr = zend_mm_mmap_fixed (( void * )(( char * ) ptr + ( alignment - offset )), size );
687
- offset = ZEND_MM_ALIGNED_OFFSET (ptr , alignment );
688
- if ( offset != 0 ) {
689
- zend_mm_munmap (ptr , size );
710
+ offset = ZEND_MM_ALIGNED_OFFSET ( ptr , alignment );
711
+ if ( offset != 0 ) {
712
+ offset = alignment - offset ;
713
+ }
714
+ zend_mm_munmap (ptr , size + alignment );
715
+ ptr = zend_mm_mmap_win_reserve_fixed (( void * )(( char * ) ptr + offset ), size );
716
+ #endif
717
+ if (ptr == NULL ) {
718
+ ptr = zend_mm_mmap_win_reserve ( size + alignment );
719
+ if (ptr == NULL ) {
690
720
return NULL ;
691
721
}
692
- return ptr ;
722
+ }
723
+
724
+ offset = ZEND_MM_ALIGNED_OFFSET (ptr , alignment );
725
+ if (offset != 0 ) {
726
+ offset = alignment - offset ;
727
+ }
728
+ return zend_mm_mmap_win_commit ((void * )((char * )ptr + offset ), size );
693
729
#else
694
- offset = ZEND_MM_ALIGNED_OFFSET (ptr , alignment );
730
+ void * ptr = zend_mm_mmap (size );
731
+ if (ptr == NULL ) {
732
+ return NULL ;
733
+ }
734
+
735
+ if (ZEND_MM_ALIGNED_OFFSET (ptr , alignment ) != 0 ) {
736
+ zend_mm_munmap (ptr , size );
737
+ ptr = zend_mm_mmap (size + alignment - REAL_PAGE_SIZE );
738
+
739
+ size_t offset = ZEND_MM_ALIGNED_OFFSET (ptr , alignment );
695
740
if (offset != 0 ) {
696
741
offset = alignment - offset ;
697
742
zend_mm_munmap (ptr , offset );
@@ -701,14 +746,16 @@ static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
701
746
if (alignment > REAL_PAGE_SIZE ) {
702
747
zend_mm_munmap ((char * )ptr + size , alignment - REAL_PAGE_SIZE );
703
748
}
704
- # ifdef MADV_HUGEPAGE
705
- if (zend_mm_use_huge_pages ) {
706
- madvise (ptr , size , MADV_HUGEPAGE );
707
- }
708
- # endif
709
- #endif
710
- return ptr ;
711
749
}
750
+
751
+ #ifdef MADV_HUGEPAGE
752
+ if (zend_mm_use_huge_pages ) {
753
+ madvise (ptr , size , MADV_HUGEPAGE );
754
+ }
755
+ #endif
756
+
757
+ return ptr ;
758
+ #endif
712
759
}
713
760
714
761
static void * zend_mm_chunk_alloc (zend_mm_heap * heap , size_t size , size_t alignment )
@@ -1846,11 +1893,7 @@ static zend_mm_heap *zend_mm_init(void)
1846
1893
1847
1894
if (UNEXPECTED (chunk == NULL )) {
1848
1895
#if ZEND_MM_ERROR
1849
- #ifdef _WIN32
1850
- stderr_last_error ("Can't initialize heap" );
1851
- #else
1852
- fprintf (stderr , "\nCan't initialize heap: [%d] %s\n" , errno , strerror (errno ));
1853
- #endif
1896
+ fprintf (stderr , "Can't initialize heap\n" );
1854
1897
#endif
1855
1898
return NULL ;
1856
1899
}
@@ -2978,11 +3021,7 @@ ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void
2978
3021
chunk = (zend_mm_chunk * )handlers -> chunk_alloc (& tmp_storage , ZEND_MM_CHUNK_SIZE , ZEND_MM_CHUNK_SIZE );
2979
3022
if (UNEXPECTED (chunk == NULL )) {
2980
3023
#if ZEND_MM_ERROR
2981
- #ifdef _WIN32
2982
- stderr_last_error ("Can't initialize heap" );
2983
- #else
2984
- fprintf (stderr , "\nCan't initialize heap: [%d] %s\n" , errno , strerror (errno ));
2985
- #endif
3024
+ fprintf (stderr , "Can't initialize heap\n" );
2986
3025
#endif
2987
3026
return NULL ;
2988
3027
}
@@ -3025,11 +3064,7 @@ ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void
3025
3064
if (!storage ) {
3026
3065
handlers -> chunk_free (& tmp_storage , chunk , ZEND_MM_CHUNK_SIZE );
3027
3066
#if ZEND_MM_ERROR
3028
- #ifdef _WIN32
3029
- stderr_last_error ("Can't initialize heap" );
3030
- #else
3031
- fprintf (stderr , "\nCan't initialize heap: [%d] %s\n" , errno , strerror (errno ));
3032
- #endif
3067
+ fprintf (stderr , "Can't initialize heap\n" );
3033
3068
#endif
3034
3069
return NULL ;
3035
3070
}
0 commit comments