github.com/goccy/go-jit@v0.0.0-20200514131505-ff78d45cf6af/internal/ccall/jit-memory-cache.c (about) 1 /* 2 * jit-cache.c - Translated function cache implementation. 3 * 4 * Copyright (C) 2002, 2003, 2008 Southern Storm Software, Pty Ltd. 5 * Copyright (C) 2012 Aleksey Demakov 6 * 7 * This file is part of the libjit library. 8 * 9 * The libjit library is free software: you can redistribute it and/or 10 * modify it under the terms of the GNU Lesser General Public License 11 * as published by the Free Software Foundation, either version 2.1 of 12 * the License, or (at your option) any later version. 13 * 14 * The libjit library is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * Lesser General Public License for more details. 18 * 19 * You should have received a copy of the GNU Lesser General Public 20 * License along with the libjit library. If not, see 21 * <http://www.gnu.org/licenses/>. 22 */ 23 24 /* 25 See the bottom of this file for documentation on the cache system. 26 */ 27 28 #include "jit-internal.h" 29 #include "jit-apply-func.h" 30 31 #include <stddef.h> /* for offsetof */ 32 33 #ifdef __cplusplus 34 extern "C" { 35 #endif 36 37 /* 38 * Tune the default size of a cache page. Memory is allocated from 39 * the system in chunks of this size. 40 */ 41 #ifndef JIT_CACHE_PAGE_SIZE 42 #define JIT_CACHE_PAGE_SIZE (64 * 1024) 43 #endif 44 45 /* 46 * Tune the maximum size of a cache page. The size of a page might be 47 * up to (JIT_CACHE_PAGE_SIZE * JIT_CACHE_MAX_PAGE_FACTOR). This will 48 * also determine the maximum method size that can be translated. 49 */ 50 #ifndef JIT_CACHE_MAX_PAGE_FACTOR 51 #define JIT_CACHE_MAX_PAGE_FACTOR 1024 52 #endif 53 54 /* 55 * Method information block, organised as a red-black tree node. 56 * There may be more than one such block associated with a method 57 * if the method contains exception regions. 58 */ 59 typedef struct jit_cache_node *jit_cache_node_t; 60 struct jit_cache_node 61 { 62 jit_cache_node_t left; /* Left sub-tree and red/black bit */ 63 jit_cache_node_t right; /* Right sub-tree */ 64 unsigned char *start; /* Start of the cache region */ 65 unsigned char *end; /* End of the cache region */ 66 jit_function_t func; /* Function info block slot */ 67 }; 68 69 /* 70 * Structure of the page list entry. 71 */ 72 struct jit_cache_page 73 { 74 void *page; /* Page memory */ 75 long factor; /* Page size factor */ 76 }; 77 78 /* 79 * Structure of the method cache. 80 */ 81 typedef struct jit_cache *jit_cache_t; 82 struct jit_cache 83 { 84 struct jit_cache_page *pages; /* List of pages currently in the cache */ 85 unsigned long numPages; /* Number of pages currently in the cache */ 86 unsigned long maxNumPages; /* Maximum number of pages that could be in the list */ 87 unsigned long pageSize; /* Default size of a page for allocation */ 88 unsigned int maxPageFactor; /* Maximum page size factor */ 89 long pagesLeft; /* Number of pages left to allocate */ 90 unsigned char *free_start; /* Current start of the free region */ 91 unsigned char *free_end; /* Current end of the free region */ 92 unsigned char *prev_start; /* Previous start of the free region */ 93 unsigned char *prev_end; /* Previous end of the free region */ 94 jit_cache_node_t node; /* Information for the current function */ 95 struct jit_cache_node head; /* Head of the lookup tree */ 96 struct jit_cache_node nil; /* Nil pointer for the lookup tree */ 97 }; 98 99 /* 100 * Get or set the sub-trees of a node. 101 */ 102 #define GetLeft(node) \ 103 ((jit_cache_node_t)(((jit_nuint)(node)->left) & ~((jit_nuint)1))) 104 #define GetRight(node) \ 105 ((node)->right) 106 #define SetLeft(node,value) \ 107 ((node)->left = (jit_cache_node_t)(((jit_nuint)value) | \ 108 (((jit_nuint)(node)->left) & ((jit_nuint)1)))) 109 #define SetRight(node,value) \ 110 ((node)->right = (value)) 111 112 /* 113 * Get or set the red/black state of a node. 114 */ 115 #define GetRed(node) \ 116 ((((jit_nuint)((node)->left)) & ((jit_nuint)1)) != 0) 117 #define SetRed(node) \ 118 ((node)->left = (jit_cache_node_t)(((jit_nuint)(node)->left) | ((jit_nuint)1))) 119 #define SetBlack(node) \ 120 ((node)->left = (jit_cache_node_t)(((jit_nuint)(node)->left) & ~((jit_nuint)1))) 121 122 void _jit_cache_destroy(jit_cache_t cache); 123 void * _jit_cache_alloc_data(jit_cache_t cache, unsigned long size, unsigned long align); 124 125 /* 126 * Allocate a cache page and add it to the cache. 127 */ 128 static void 129 AllocCachePage(jit_cache_t cache, int factor) 130 { 131 long num; 132 unsigned char *ptr; 133 struct jit_cache_page *list; 134 135 /* The minimum page factor is 1 */ 136 if(factor <= 0) 137 { 138 factor = 1; 139 } 140 141 /* If too big a page is requested, then bail out */ 142 if(((unsigned int) factor) > cache->maxPageFactor) 143 { 144 goto failAlloc; 145 } 146 147 /* If the page limit is hit, then bail out */ 148 if(cache->pagesLeft >= 0 && cache->pagesLeft < factor) 149 { 150 goto failAlloc; 151 } 152 153 /* Try to allocate a physical page */ 154 ptr = (unsigned char *) _jit_malloc_exec((unsigned int) cache->pageSize * factor); 155 if(!ptr) 156 { 157 goto failAlloc; 158 } 159 160 /* Add the page to the page list. We keep this in an array 161 that is separate from the pages themselves so that we don't 162 have to "touch" the pages to free them. Touching the pages 163 may cause them to be swapped in if they are currently out. 164 There's no point doing that if we are trying to free them */ 165 if(cache->numPages == cache->maxNumPages) 166 { 167 if(cache->numPages == 0) 168 { 169 num = 16; 170 } 171 else 172 { 173 num = cache->numPages * 2; 174 } 175 if(cache->pagesLeft > 0 && num > (cache->numPages + cache->pagesLeft - factor + 1)) 176 { 177 num = cache->numPages + cache->pagesLeft - factor + 1; 178 } 179 180 list = (struct jit_cache_page *) jit_realloc(cache->pages, 181 sizeof(struct jit_cache_page) * num); 182 if(!list) 183 { 184 _jit_free_exec(ptr, cache->pageSize * factor); 185 failAlloc: 186 cache->free_start = 0; 187 cache->free_end = 0; 188 return; 189 } 190 191 cache->maxNumPages = num; 192 cache->pages = list; 193 } 194 cache->pages[cache->numPages].page = ptr; 195 cache->pages[cache->numPages].factor = factor; 196 ++(cache->numPages); 197 198 /* Adjust te number of pages left before we hit the limit */ 199 if(cache->pagesLeft > 0) 200 { 201 cache->pagesLeft -= factor; 202 } 203 204 /* Set up the working region within the new page */ 205 cache->free_start = ptr; 206 cache->free_end = ptr + (int) cache->pageSize * factor; 207 } 208 209 /* 210 * Compare a key against a node, being careful of sentinel nodes. 211 */ 212 static int 213 CacheCompare(jit_cache_t cache, unsigned char *key, jit_cache_node_t node) 214 { 215 if(node == &cache->nil || node == &cache->head) 216 { 217 /* Every key is greater than the sentinel nodes */ 218 return 1; 219 } 220 else 221 { 222 /* Compare a regular node */ 223 if(key < node->start) 224 { 225 return -1; 226 } 227 else if(key > node->start) 228 { 229 return 1; 230 } 231 else 232 { 233 return 0; 234 } 235 } 236 } 237 238 /* 239 * Rotate a sub-tree around a specific node. 240 */ 241 static jit_cache_node_t 242 CacheRotate(jit_cache_t cache, unsigned char *key, jit_cache_node_t around) 243 { 244 jit_cache_node_t child, grandChild; 245 int setOnLeft; 246 if(CacheCompare(cache, key, around) < 0) 247 { 248 child = GetLeft(around); 249 setOnLeft = 1; 250 } 251 else 252 { 253 child = GetRight(around); 254 setOnLeft = 0; 255 } 256 if(CacheCompare(cache, key, child) < 0) 257 { 258 grandChild = GetLeft(child); 259 SetLeft(child, GetRight(grandChild)); 260 SetRight(grandChild, child); 261 } 262 else 263 { 264 grandChild = GetRight(child); 265 SetRight(child, GetLeft(grandChild)); 266 SetLeft(grandChild, child); 267 } 268 if(setOnLeft) 269 { 270 SetLeft(around, grandChild); 271 } 272 else 273 { 274 SetRight(around, grandChild); 275 } 276 return grandChild; 277 } 278 279 /* 280 * Split a red-black tree at the current position. 281 */ 282 #define Split() \ 283 do { \ 284 SetRed(temp); \ 285 SetBlack(GetLeft(temp)); \ 286 SetBlack(GetRight(temp)); \ 287 if(GetRed(parent)) \ 288 { \ 289 SetRed(grandParent); \ 290 if((CacheCompare(cache, key, grandParent) < 0) != \ 291 (CacheCompare(cache, key, parent) < 0)) \ 292 { \ 293 parent = CacheRotate(cache, key, grandParent); \ 294 } \ 295 temp = CacheRotate(cache, key, greatGrandParent); \ 296 SetBlack(temp); \ 297 } \ 298 } while (0) 299 300 /* 301 * Add a method region block to the red-black lookup tree 302 * that is associated with a method cache. 303 */ 304 static void 305 AddToLookupTree(jit_cache_t cache, jit_cache_node_t method) 306 { 307 unsigned char *key = method->start; 308 jit_cache_node_t temp; 309 jit_cache_node_t greatGrandParent; 310 jit_cache_node_t grandParent; 311 jit_cache_node_t parent; 312 jit_cache_node_t nil = &(cache->nil); 313 int cmp; 314 315 /* Search for the insert position */ 316 temp = &(cache->head); 317 greatGrandParent = temp; 318 grandParent = temp; 319 parent = temp; 320 while(temp != nil) 321 { 322 /* Adjust our ancestor pointers */ 323 greatGrandParent = grandParent; 324 grandParent = parent; 325 parent = temp; 326 327 /* Compare the key against the current node */ 328 cmp = CacheCompare(cache, key, temp); 329 if(cmp == 0) 330 { 331 /* This is a duplicate, which normally shouldn't happen. 332 If it does happen, then ignore the node and bail out */ 333 return; 334 } 335 else if(cmp < 0) 336 { 337 temp = GetLeft(temp); 338 } 339 else 340 { 341 temp = GetRight(temp); 342 } 343 344 /* Do we need to split this node? */ 345 if(GetRed(GetLeft(temp)) && GetRed(GetRight(temp))) 346 { 347 Split(); 348 } 349 } 350 351 /* Insert the new node into the current position */ 352 method->left = (jit_cache_node_t)(((jit_nuint)nil) | ((jit_nuint)1)); 353 method->right = nil; 354 if(CacheCompare(cache, key, parent) < 0) 355 { 356 SetLeft(parent, method); 357 } 358 else 359 { 360 SetRight(parent, method); 361 } 362 Split(); 363 SetBlack(cache->head.right); 364 } 365 366 jit_cache_t 367 _jit_cache_create(jit_context_t context) 368 { 369 jit_cache_t cache; 370 long limit, cache_page_size; 371 int max_page_factor; 372 unsigned long exec_page_size; 373 374 limit = (long) 375 jit_context_get_meta_numeric(context, JIT_OPTION_CACHE_LIMIT); 376 cache_page_size = (long) 377 jit_context_get_meta_numeric(context, JIT_OPTION_CACHE_PAGE_SIZE); 378 max_page_factor = (int) 379 jit_context_get_meta_numeric(context, JIT_OPTION_CACHE_MAX_PAGE_FACTOR); 380 381 /* Allocate space for the cache control structure */ 382 if((cache = (jit_cache_t) jit_malloc(sizeof(struct jit_cache))) == 0) 383 { 384 return 0; 385 } 386 387 /* determine the default cache page size */ 388 exec_page_size = jit_vmem_page_size(); 389 if(cache_page_size <= 0) 390 { 391 cache_page_size = JIT_CACHE_PAGE_SIZE; 392 } 393 if(cache_page_size < exec_page_size) 394 { 395 cache_page_size = exec_page_size; 396 } 397 else 398 { 399 cache_page_size = (cache_page_size / exec_page_size) * exec_page_size; 400 } 401 402 /* determine the maximum page size factor */ 403 if(max_page_factor <= 0) 404 { 405 max_page_factor = JIT_CACHE_MAX_PAGE_FACTOR; 406 } 407 408 /* Initialize the rest of the cache fields */ 409 cache->pages = 0; 410 cache->numPages = 0; 411 cache->maxNumPages = 0; 412 cache->pageSize = cache_page_size; 413 cache->maxPageFactor = max_page_factor; 414 cache->free_start = 0; 415 cache->free_end = 0; 416 if(limit > 0) 417 { 418 cache->pagesLeft = limit / cache_page_size; 419 if(cache->pagesLeft < 1) 420 { 421 cache->pagesLeft = 1; 422 } 423 } 424 else 425 { 426 cache->pagesLeft = -1; 427 } 428 cache->node = 0; 429 cache->nil.left = &(cache->nil); 430 cache->nil.right = &(cache->nil); 431 cache->nil.func = 0; 432 cache->head.left = 0; 433 cache->head.right = &(cache->nil); 434 cache->head.func = 0; 435 436 /* Allocate the initial cache page */ 437 AllocCachePage(cache, 0); 438 if(!cache->free_start) 439 { 440 _jit_cache_destroy(cache); 441 return 0; 442 } 443 444 /* Ready to go */ 445 return cache; 446 } 447 448 void 449 _jit_cache_destroy(jit_cache_t cache) 450 { 451 unsigned long page; 452 453 /* Free all of the cache pages */ 454 for(page = 0; page < cache->numPages; ++page) 455 { 456 _jit_free_exec(cache->pages[page].page, 457 cache->pageSize * cache->pages[page].factor); 458 } 459 if(cache->pages) 460 { 461 jit_free(cache->pages); 462 } 463 464 /* Free the cache object itself */ 465 jit_free(cache); 466 } 467 468 int 469 _jit_cache_extend(jit_cache_t cache, int count) 470 { 471 /* Compute the page size factor */ 472 int factor = 1 << count; 473 474 /* Bail out if there is a started function */ 475 if(cache->node) 476 { 477 return JIT_MEMORY_ERROR; 478 } 479 480 /* If we had a newly allocated page then it has to be freed 481 to let allocate another new page of appropriate size. */ 482 struct jit_cache_page *p = &cache->pages[cache->numPages - 1]; 483 if((cache->free_start == ((unsigned char *)p->page)) 484 && (cache->free_end == (cache->free_start + cache->pageSize * p->factor))) 485 { 486 _jit_free_exec(p->page, cache->pageSize * p->factor); 487 488 --(cache->numPages); 489 if(cache->pagesLeft >= 0) 490 { 491 cache->pagesLeft += p->factor; 492 } 493 cache->free_start = 0; 494 cache->free_end = 0; 495 496 if(factor <= p->factor) 497 { 498 factor = p->factor << 1; 499 } 500 } 501 502 /* Allocate a new page now */ 503 AllocCachePage(cache, factor); 504 if(!cache->free_start) 505 { 506 return JIT_MEMORY_TOO_BIG; 507 } 508 return JIT_MEMORY_OK; 509 } 510 511 jit_function_t 512 _jit_cache_alloc_function(jit_cache_t cache) 513 { 514 return jit_cnew(struct _jit_function); 515 } 516 517 void 518 _jit_cache_free_function(jit_cache_t cache, jit_function_t func) 519 { 520 jit_free(func); 521 } 522 523 int 524 _jit_cache_start_function(jit_cache_t cache, jit_function_t func) 525 { 526 /* Bail out if there is a started function already */ 527 if(cache->node) 528 { 529 return JIT_MEMORY_ERROR; 530 } 531 /* Bail out if the cache is already full */ 532 if(!cache->free_start) 533 { 534 return JIT_MEMORY_TOO_BIG; 535 } 536 537 /* Save the cache position */ 538 cache->prev_start = cache->free_start; 539 cache->prev_end = cache->free_end; 540 541 /* Allocate a new cache node */ 542 cache->node = _jit_cache_alloc_data( 543 cache, sizeof(struct jit_cache_node), sizeof(void *)); 544 if(!cache->node) 545 { 546 return JIT_MEMORY_RESTART; 547 } 548 cache->node->func = func; 549 550 /* Initialize the function information */ 551 cache->node->start = cache->free_start; 552 cache->node->end = 0; 553 cache->node->left = 0; 554 cache->node->right = 0; 555 556 return JIT_MEMORY_OK; 557 } 558 559 int 560 _jit_cache_end_function(jit_cache_t cache, int result) 561 { 562 /* Bail out if there is no started function */ 563 if(!cache->node) 564 { 565 return JIT_MEMORY_ERROR; 566 } 567 568 /* Determine if we ran out of space while writing the function */ 569 if(result != JIT_MEMORY_OK) 570 { 571 /* Restore the saved cache position */ 572 cache->free_start = cache->prev_start; 573 cache->free_end = cache->prev_end; 574 cache->node = 0; 575 576 return JIT_MEMORY_RESTART; 577 } 578 579 /* Update the method region block and then add it to the lookup tree */ 580 cache->node->end = cache->free_start; 581 AddToLookupTree(cache, cache->node); 582 cache->node = 0; 583 584 /* The method is ready to go */ 585 return JIT_MEMORY_OK; 586 } 587 588 void * 589 _jit_cache_get_code_break(jit_cache_t cache) 590 { 591 /* Bail out if there is no started function */ 592 if(!cache->node) 593 { 594 return 0; 595 } 596 597 /* Return the address of the available code area */ 598 return cache->free_start; 599 } 600 601 void 602 _jit_cache_set_code_break(jit_cache_t cache, void *ptr) 603 { 604 /* Bail out if there is no started function */ 605 if(!cache->node) 606 { 607 return; 608 } 609 /* Sanity checks */ 610 if((unsigned char *) ptr < cache->free_start) 611 { 612 return; 613 } 614 if((unsigned char *) ptr > cache->free_end) 615 { 616 return; 617 } 618 619 /* Update the address of the available code area */ 620 cache->free_start = ptr; 621 } 622 623 void * 624 _jit_cache_get_code_limit(jit_cache_t cache) 625 { 626 /* Bail out if there is no started function */ 627 if(!cache->node) 628 { 629 return 0; 630 } 631 632 /* Return the end address of the available code area */ 633 return cache->free_end; 634 } 635 636 void * 637 _jit_cache_alloc_data(jit_cache_t cache, unsigned long size, unsigned long align) 638 { 639 unsigned char *ptr; 640 641 /* Get memory from the top of the free region, so that it does not 642 overlap with the function code possibly being written at the bottom 643 of the free region */ 644 ptr = cache->free_end - size; 645 ptr = (unsigned char *) (((jit_nuint) ptr) & ~(align - 1)); 646 if(ptr < cache->free_start) 647 { 648 /* When we aligned the block, it caused an overflow */ 649 return 0; 650 } 651 652 /* Allocate the block and return it */ 653 cache->free_end = ptr; 654 return ptr; 655 } 656 657 static void * 658 alloc_code(jit_cache_t cache, unsigned int size, unsigned int align) 659 { 660 unsigned char *ptr; 661 662 /* Bail out if there is a started function */ 663 if(cache->node) 664 { 665 return 0; 666 } 667 /* Bail out if there is no cache available */ 668 if(!cache->free_start) 669 { 670 return 0; 671 } 672 673 /* Allocate aligned memory */ 674 ptr = cache->free_start; 675 if(align > 1) 676 { 677 jit_nuint p = ((jit_nuint) ptr + align - 1) & ~(align - 1); 678 ptr = (unsigned char *) p; 679 } 680 681 /* Do we need to allocate a new cache page? */ 682 if((ptr + size) > cache->free_end) 683 { 684 /* Allocate a new page */ 685 AllocCachePage(cache, 0); 686 687 /* Bail out if the cache is full */ 688 if(!cache->free_start) 689 { 690 return 0; 691 } 692 693 /* Allocate memory from the new page */ 694 ptr = cache->free_start; 695 if(align > 1) 696 { 697 jit_nuint p = ((jit_nuint) ptr + align - 1) & ~(align - 1); 698 ptr = (unsigned char *) p; 699 } 700 } 701 702 /* Allocate the block and return it */ 703 cache->free_start = ptr + size; 704 return (void *) ptr; 705 } 706 707 void * 708 _jit_cache_alloc_trampoline(jit_cache_t cache) 709 { 710 return alloc_code(cache, 711 jit_get_trampoline_size(), 712 jit_get_trampoline_alignment()); 713 } 714 715 void 716 _jit_cache_free_trampoline(jit_cache_t cache, void *trampoline) 717 { 718 /* not supported yet */ 719 } 720 721 void * 722 _jit_cache_alloc_closure(jit_cache_t cache) 723 { 724 return alloc_code(cache, 725 jit_get_closure_size(), 726 jit_get_closure_alignment()); 727 } 728 729 void 730 _jit_cache_free_closure(jit_cache_t cache, void *closure) 731 { 732 /* not supported yet */ 733 } 734 735 #if 0 736 void * 737 _jit_cache_alloc_no_method(jit_cache_t cache, unsigned long size, unsigned long align) 738 { 739 unsigned char *ptr; 740 741 /* Bail out if there is a started function */ 742 if(cache->method) 743 { 744 return 0; 745 } 746 /* Bail out if there is no cache available */ 747 if(!cache->free_start) 748 { 749 return 0; 750 } 751 /* Bail out if the request is too big to ever be satisfiable */ 752 if((size + align - 1) > (cache->pageSize * cache->maxPageFactor)) 753 { 754 return 0; 755 } 756 757 /* Allocate memory from the top of the current free region, so 758 * that it does not overlap with the method code being written 759 * at the bottom of the free region */ 760 ptr = cache->free_end - size; 761 ptr = (unsigned char *) (((jit_nuint) ptr) & ~((jit_nuint) align - 1)); 762 763 /* Do we need to allocate a new cache page? */ 764 if(ptr < cache->free_start) 765 { 766 /* Find the appropriate page size */ 767 int factor = 1; 768 while((size + align - 1) > (factor * cache->pageSize)) { 769 factor <<= 1; 770 } 771 772 /* Try to allocate it */ 773 AllocCachePage(cache, factor); 774 775 /* Bail out if the cache is full */ 776 if(!cache->free_start) 777 { 778 return 0; 779 } 780 781 /* Allocate memory from the new page */ 782 ptr = cache->free_end - size; 783 ptr = (unsigned char *) (((jit_nuint) ptr) & ~((jit_nuint) align - 1)); 784 } 785 786 /* Allocate the block and return it */ 787 cache->free_end = ptr; 788 return (void *)ptr; 789 } 790 #endif 791 792 void * 793 _jit_cache_find_function_info(jit_cache_t cache, void *pc) 794 { 795 jit_cache_node_t node = cache->head.right; 796 while(node != &(cache->nil)) 797 { 798 if(((unsigned char *)pc) < node->start) 799 { 800 node = GetLeft(node); 801 } 802 else if(((unsigned char *)pc) >= node->end) 803 { 804 node = GetRight(node); 805 } 806 else 807 { 808 return node; 809 } 810 } 811 return 0; 812 } 813 814 jit_function_t 815 _jit_cache_get_function(jit_cache_t cache, void *func_info) 816 { 817 if(func_info) 818 { 819 jit_cache_node_t node = (jit_cache_node_t) func_info; 820 return node->func; 821 } 822 return 0; 823 } 824 825 void * 826 _jit_cache_get_function_start(jit_memory_context_t memctx, void *func_info) 827 { 828 if(func_info) 829 { 830 jit_cache_node_t node = (jit_cache_node_t) func_info; 831 return node->start; 832 } 833 return 0; 834 } 835 836 void * 837 _jit_cache_get_function_end(jit_memory_context_t memctx, void *func_info) 838 { 839 if(func_info) 840 { 841 jit_cache_node_t node = (jit_cache_node_t) func_info; 842 return node->end; 843 } 844 return 0; 845 } 846 847 jit_memory_manager_t 848 jit_default_memory_manager(void) 849 { 850 static const struct jit_memory_manager mm = { 851 852 (jit_memory_context_t (*)(jit_context_t)) 853 &_jit_cache_create, 854 855 (void (*)(jit_memory_context_t)) 856 &_jit_cache_destroy, 857 858 (jit_function_info_t (*)(jit_memory_context_t, void *)) 859 &_jit_cache_find_function_info, 860 861 (jit_function_t (*)(jit_memory_context_t, jit_function_info_t)) 862 &_jit_cache_get_function, 863 864 (void * (*)(jit_memory_context_t, jit_function_info_t)) 865 &_jit_cache_get_function_start, 866 867 (void * (*)(jit_memory_context_t, jit_function_info_t)) 868 &_jit_cache_get_function_end, 869 870 (jit_function_t (*)(jit_memory_context_t)) 871 &_jit_cache_alloc_function, 872 873 (void (*)(jit_memory_context_t, jit_function_t)) 874 &_jit_cache_free_function, 875 876 (int (*)(jit_memory_context_t, jit_function_t)) 877 &_jit_cache_start_function, 878 879 (int (*)(jit_memory_context_t, int)) 880 &_jit_cache_end_function, 881 882 (int (*)(jit_memory_context_t, int)) 883 &_jit_cache_extend, 884 885 (void * (*)(jit_memory_context_t)) 886 &_jit_cache_get_code_limit, 887 888 (void * (*)(jit_memory_context_t)) 889 &_jit_cache_get_code_break, 890 891 (void (*)(jit_memory_context_t, void *)) 892 &_jit_cache_set_code_break, 893 894 (void * (*)(jit_memory_context_t)) 895 &_jit_cache_alloc_trampoline, 896 897 (void (*)(jit_memory_context_t, void *)) 898 &_jit_cache_free_trampoline, 899 900 (void * (*)(jit_memory_context_t)) 901 &_jit_cache_alloc_closure, 902 903 (void (*)(jit_memory_context_t, void *)) 904 &_jit_cache_free_closure, 905 906 (void * (*)(jit_memory_context_t, jit_size_t, jit_size_t)) 907 &_jit_cache_alloc_data 908 }; 909 return &mm; 910 } 911 912 /* 913 914 Using the cache 915 --------------- 916 917 To output the code for a method, first call _jit_cache_start_method: 918 919 jit_cache_posn posn; 920 int result; 921 922 result = _jit_cache_start_method(cache, &posn, factor, 923 METHOD_ALIGNMENT, method); 924 925 "factor" is used to control cache space allocation for the method. 926 The cache space is allocated by pages. The value 0 indicates that 927 the method has to use the space left after the last allocation. 928 The value 1 or more indicates that the method has to start on a 929 newly allocated space that must contain the specified number of 930 consecutive pages. 931 932 "METHOD_ALIGNMENT" is used to align the start of the method on an 933 appropriate boundary for the target CPU. Use the value 1 if no 934 special alignment is required. Note: this value is a hint to the 935 cache - it may alter the alignment value. 936 937 "method" is a value that uniquely identifies the method that is being 938 translated. Usually this is the "jit_function_t" pointer. 939 940 The function initializes the "posn" structure to point to the start 941 and end of the space available for the method output. The function 942 returns one of three result codes: 943 944 JIT_CACHE_OK The function call was successful. 945 JIT_CACHE_RESTART The cache does not currently have enough 946 space to fit any method. This code may 947 only be returned if the "factor" value 948 was 0. In this case it is necessary to 949 restart the method output process by 950 calling _jit_cache_start_method again 951 with a bigger "factor" value. 952 JIT_CACHE_TOO_BIG The cache does not have any space left 953 for allocation. In this case a restart 954 won't help. 955 956 Some CPU optimization guides recommend that labels should be aligned. 957 This can be achieved using _jit_cache_align. 958 959 Once the method code has been output, call _jit_cache_end_method to finalize 960 the process. This function returns one of two result codes: 961 962 JIT_CACHE_OK The method output process was successful. 963 JIT_CACHE_RESTART The cache space overflowed. It is necessary 964 to restart the method output process by 965 calling _jit_cache_start_method again 966 with a bigger "factor" value. 967 968 The caller should repeatedly translate the method while _jit_cache_end_method 969 continues to return JIT_CACHE_END_RESTART. Normally there will be no 970 more than a single request to restart, but the caller should not rely 971 upon this. The cache algorithm guarantees that the restart loop will 972 eventually terminate. 973 974 Cache data structure 975 -------------------- 976 977 The cache consists of one or more "cache pages", which contain method 978 code and auxiliary data. The default size for a cache page is 64k 979 (JIT_CACHE_PAGE_SIZE). The size is adjusted to be a multiple 980 of the system page size (usually 4k), and then stored in "pageSize". 981 982 Method code is written into a cache page starting at the bottom of the 983 page, and growing upwards. Auxiliary data is written into a cache page 984 starting at the top of the page, and growing downwards. When the two 985 regions meet, a new cache page is allocated and the process restarts. 986 987 To allow methods bigger than a single cache page it is possible to 988 allocate a block of consecutive pages as a single unit. The method 989 code and auxiliary data is written to such a multiple-page block in 990 the same manner as into an ordinary page. 991 992 Each method has one or more jit_cache_method auxiliary data blocks associated 993 with it. These blocks indicate the start and end of regions within the 994 method. Normally these regions correspond to exception "try" blocks, or 995 regular code between "try" blocks. 996 997 The jit_cache_method blocks are organised into a red-black tree, which 998 is used to perform fast lookups by address (_jit_cache_get_method). These 999 lookups are used when walking the stack during exceptions or security 1000 processing. 1001 1002 Each method can also have offset information associated with it, to map 1003 between native code addresses and offsets within the original bytecode. 1004 This is typically used to support debugging. Offset information is stored 1005 as auxiliary data, attached to the jit_cache_method block. 1006 1007 Threading issues 1008 ---------------- 1009 1010 Writing a method to the cache, querying a method by address, or querying 1011 offset information for a method, are not thread-safe. The caller should 1012 arrange for a cache lock to be acquired prior to performing these 1013 operations. 1014 1015 Executing methods from the cache is thread-safe, as the method code is 1016 fixed in place once it has been written. 1017 1018 Note: some CPU's require that a special cache flush instruction be 1019 performed before executing method code that has just been written. 1020 This is especially important in SMP environments. It is the caller's 1021 responsibility to perform this flush operation. 1022 1023 We do not provide locking or CPU flush capabilities in the cache 1024 implementation itself, because the caller may need to perform other 1025 duties before flushing the CPU cache or releasing the lock. 1026 1027 The following is the recommended way to map an "jit_function_t" pointer 1028 to a starting address for execution: 1029 1030 Look in "jit_function_t" to see if we already have a starting address. 1031 If so, then bail out. 1032 Acquire the cache lock. 1033 Check again to see if we already have a starting address, just 1034 in case another thread got here first. If so, then release 1035 the cache lock and bail out. 1036 Translate the method. 1037 Update the "jit_function_t" structure to contain the starting address. 1038 Force a CPU cache line flush. 1039 Release the cache lock. 1040 1041 Why aren't methods flushed when the cache fills up? 1042 --------------------------------------------------- 1043 1044 In this cache implementation, methods are never "flushed" when the 1045 cache becomes full. Instead, all translation stops. This is not a bug. 1046 It is a feature. 1047 1048 In a multi-threaded environment, it is impossible to know if some 1049 other thread is executing the code of a method that may be a candidate 1050 for flushing. Impossible that is unless one introduces a huge number 1051 of read-write locks, one per method, to prevent a method from being 1052 flushed. The read locks must be acquired on entry to a method, and 1053 released on exit. The write locks are acquired prior to translation. 1054 1055 The overhead of introducing all of these locks and the associated cache 1056 data structures is very high. The only safe thing to do is to assume 1057 that once a method has been translated, its code must be fixed in place 1058 for all time. 1059 1060 We've looked at the code for other Free Software and Open Source JIT's, 1061 and they all use a constantly-growing method cache. No one has found 1062 a solution to this problem, it seems. Suggestions are welcome. 1063 1064 To prevent the cache from chewing up all of system memory, it is possible 1065 to set a limit on how far it will grow. Once the limit is reached, out 1066 of memory will be reported and there is no way to recover. 1067 1068 */ 1069 1070 #ifdef __cplusplus 1071 }; 1072 #endif