system/core
Révision | 71b34a03e4a6d7be4a246d9e3e5fa5bd61128b38 (tree) |
---|---|
l'heure | 2016-07-20 18:02:24 |
Auteur | Chih-Wei Huang <cwhuang@linu...> |
Commiter | Chih-Wei Huang |
Merge branch 'android-ia' into marshmallow-x86
@@ -85,6 +85,13 @@ struct asocket { | ||
85 | 85 | ** but packets are still queued for delivery |
86 | 86 | */ |
87 | 87 | int closing; |
88 | + /* flag: set when this socket is running. | |
89 | + */ | |
90 | + int running; | |
91 | + /* flag: force close this socket. if this socket is running, other | |
92 | + ** thread set this flag to request close it. | |
93 | + */ | |
94 | + int force_close; | |
88 | 95 | |
89 | 96 | /* flag: quit adbd when both ends close the |
90 | 97 | ** local service socket |
@@ -198,7 +198,10 @@ static void help() { | ||
198 | 198 | " adb version - show version num\n" |
199 | 199 | "\n" |
200 | 200 | "scripting:\n" |
201 | - " adb wait-for-device - block until device is online\n" | |
201 | + " adb wait-for-<transport>-<state>\n" | |
202 | + " - wait for device to be in the given state:\n" | |
203 | + " device, recovery, sideload, or bootloader\n" | |
204 | + " Transport is: usb, local or any\n" | |
202 | 205 | " adb start-server - ensure that there is a server running\n" |
203 | 206 | " adb kill-server - kill the server if it is running\n" |
204 | 207 | " adb get-state - prints: offline | bootloader | device\n" |
@@ -674,19 +677,49 @@ static int ppp(int argc, const char** argv) { | ||
674 | 677 | #endif /* !defined(_WIN32) */ |
675 | 678 | } |
676 | 679 | |
680 | +static bool check_wait_for_device_syntax(const char* service) { | |
681 | + // TODO: when we have libc++ for Windows, use a regular expression instead. | |
682 | + // wait-for-((any|local|usb)-)?(bootloader|device|recovery|sideload) | |
683 | + | |
684 | + char type[20]; | |
685 | + char state[20]; | |
686 | + int length = 0; | |
687 | + if (sscanf(service, "wait-for-%20[a-z]-%20[a-z]%n", type, state, &length) < 2 || | |
688 | + length != static_cast<int>(strlen(service))) { | |
689 | + fprintf(stderr, "adb: couldn't parse 'wait-for' command: %s\n", service); | |
690 | + return false; | |
691 | + } | |
692 | + | |
693 | + if (strcmp(type, "any") != 0 && strcmp(type, "local") != 0 && strcmp(type, "usb") != 0) { | |
694 | + fprintf(stderr, "adb: unknown type %s; expected 'any', 'local', or 'usb'\n", type); | |
695 | + return false; | |
696 | + } | |
697 | + if (strcmp(state, "bootloader") != 0 && strcmp(state, "device") != 0 && | |
698 | + strcmp(state, "recovery") != 0 && strcmp(state, "sideload") != 0) { | |
699 | + fprintf(stderr, "adb: unknown state %s; " | |
700 | + "expected 'bootloader', 'device', 'recovery', or 'sideload'\n", state); | |
701 | + return false; | |
702 | + } | |
703 | + return true; | |
704 | +} | |
705 | + | |
677 | 706 | static bool wait_for_device(const char* service, transport_type t, const char* serial) { |
678 | 707 | // Was the caller vague about what they'd like us to wait for? |
679 | 708 | // If so, check they weren't more specific in their choice of transport type. |
680 | 709 | if (strcmp(service, "wait-for-device") == 0) { |
681 | 710 | if (t == kTransportUsb) { |
682 | - service = "wait-for-usb"; | |
711 | + service = "wait-for-usb-device"; | |
683 | 712 | } else if (t == kTransportLocal) { |
684 | - service = "wait-for-local"; | |
713 | + service = "wait-for-local-device"; | |
685 | 714 | } else { |
686 | - service = "wait-for-any"; | |
715 | + service = "wait-for-any-device"; | |
687 | 716 | } |
688 | 717 | } |
689 | 718 | |
719 | + if (!check_wait_for_device_syntax(service)) { | |
720 | + return false; | |
721 | + } | |
722 | + | |
690 | 723 | std::string cmd = format_host_command(service, t, serial); |
691 | 724 | std::string error; |
692 | 725 | if (adb_command(cmd, &error)) { |
@@ -44,6 +44,8 @@ | ||
44 | 44 | // of the shell's pseudo-tty master. I.e. force close it. |
45 | 45 | int SHELL_EXIT_NOTIFY_FD = -1; |
46 | 46 | |
47 | +ADB_MUTEX_DEFINE( fdevent_lock ); | |
48 | + | |
47 | 49 | static void fatal(const char *fn, const char *fmt, ...) |
48 | 50 | { |
49 | 51 | va_list ap; |
@@ -210,6 +212,7 @@ static void fdevent_process() | ||
210 | 212 | exit(1); |
211 | 213 | } |
212 | 214 | |
215 | + adb_mutex_lock(&fdevent_lock); | |
213 | 216 | for(i = 0; i < n; i++) { |
214 | 217 | struct epoll_event *ev = events + i; |
215 | 218 | fde = ev->data.ptr; |
@@ -229,6 +232,7 @@ static void fdevent_process() | ||
229 | 232 | fdevent_plist_enqueue(fde); |
230 | 233 | } |
231 | 234 | } |
235 | + adb_mutex_unlock(&fdevent_lock); | |
232 | 236 | } |
233 | 237 | |
234 | 238 | #else /* USE_SELECT */ |
@@ -364,13 +368,17 @@ static void fdevent_process() | ||
364 | 368 | unsigned events; |
365 | 369 | fd_set rfd, wfd, efd; |
366 | 370 | |
371 | + adb_mutex_lock(&fdevent_lock); | |
367 | 372 | memcpy(&rfd, &read_fds, sizeof(fd_set)); |
368 | 373 | memcpy(&wfd, &write_fds, sizeof(fd_set)); |
369 | 374 | memcpy(&efd, &error_fds, sizeof(fd_set)); |
370 | 375 | |
371 | 376 | dump_all_fds("pre select()"); |
377 | + adb_mutex_unlock(&fdevent_lock); | |
372 | 378 | |
373 | 379 | n = select(select_n, &rfd, &wfd, &efd, NULL); |
380 | + | |
381 | + adb_mutex_lock(&fdevent_lock); | |
374 | 382 | int saved_errno = errno; |
375 | 383 | D("select() returned n=%d, errno=%d\n", n, n<0?saved_errno:0); |
376 | 384 |
@@ -378,7 +386,7 @@ static void fdevent_process() | ||
378 | 386 | |
379 | 387 | if(n < 0) { |
380 | 388 | switch(saved_errno) { |
381 | - case EINTR: return; | |
389 | + case EINTR: goto unlock; | |
382 | 390 | case EBADF: |
383 | 391 | // Can't trust the FD sets after an error. |
384 | 392 | FD_ZERO(&wfd); |
@@ -387,7 +395,7 @@ static void fdevent_process() | ||
387 | 395 | break; |
388 | 396 | default: |
389 | 397 | D("Unexpected select() error=%d\n", saved_errno); |
390 | - return; | |
398 | + goto unlock; | |
391 | 399 | } |
392 | 400 | } |
393 | 401 | if(n <= 0) { |
@@ -405,7 +413,9 @@ static void fdevent_process() | ||
405 | 413 | if(events) { |
406 | 414 | fde = fd_table[i]; |
407 | 415 | if(fde == 0) |
408 | - FATAL("missing fde for fd %d\n", i); | |
416 | + // run here because the fde was just removed | |
417 | + // after rutern from select. | |
418 | + continue; | |
409 | 419 | |
410 | 420 | fde->events |= events; |
411 | 421 |
@@ -416,6 +426,8 @@ static void fdevent_process() | ||
416 | 426 | fdevent_plist_enqueue(fde); |
417 | 427 | } |
418 | 428 | } |
429 | +unlock: | |
430 | + adb_mutex_unlock(&fdevent_lock); | |
419 | 431 | } |
420 | 432 | |
421 | 433 | #endif |
@@ -500,14 +512,16 @@ static fdevent *fdevent_plist_dequeue(void) | ||
500 | 512 | return node; |
501 | 513 | } |
502 | 514 | |
503 | -static void fdevent_call_fdfunc(fdevent* fde) | |
515 | +static void fdevent_call_fdfunc_locked(fdevent* fde) | |
504 | 516 | { |
505 | 517 | unsigned events = fde->events; |
506 | 518 | fde->events = 0; |
507 | 519 | if(!(fde->state & FDE_PENDING)) return; |
508 | 520 | fde->state &= (~FDE_PENDING); |
509 | 521 | dump_fde(fde, "callback"); |
522 | + adb_mutex_unlock(&fdevent_lock); | |
510 | 523 | fde->func(fde->fd, events, fde->arg); |
524 | + adb_mutex_lock(&fdevent_lock); | |
511 | 525 | } |
512 | 526 | |
513 | 527 | static void fdevent_subproc_event_func(int fd, unsigned ev, |
@@ -523,6 +537,7 @@ static void fdevent_subproc_event_func(int fd, unsigned ev, | ||
523 | 537 | fdevent *fde = fd_table[fd]; |
524 | 538 | fdevent_add(fde, FDE_READ); |
525 | 539 | |
540 | + adb_mutex_lock(&fdevent_lock); | |
526 | 541 | if(ev & FDE_READ){ |
527 | 542 | int subproc_fd; |
528 | 543 |
@@ -532,17 +547,17 @@ static void fdevent_subproc_event_func(int fd, unsigned ev, | ||
532 | 547 | if((subproc_fd < 0) || (subproc_fd >= fd_table_max)) { |
533 | 548 | D("subproc_fd %d out of range 0, fd_table_max=%d\n", |
534 | 549 | subproc_fd, fd_table_max); |
535 | - return; | |
550 | + goto unlock; | |
536 | 551 | } |
537 | 552 | fdevent *subproc_fde = fd_table[subproc_fd]; |
538 | 553 | if(!subproc_fde) { |
539 | 554 | D("subproc_fd %d cleared from fd_table\n", subproc_fd); |
540 | - return; | |
555 | + goto unlock; | |
541 | 556 | } |
542 | 557 | if(subproc_fde->fd != subproc_fd) { |
543 | 558 | // Already reallocated? |
544 | 559 | D("subproc_fd %d != fd_table[].fd %d\n", subproc_fd, subproc_fde->fd); |
545 | - return; | |
560 | + goto unlock; | |
546 | 561 | } |
547 | 562 | |
548 | 563 | subproc_fde->force_eof = 1; |
@@ -556,17 +571,19 @@ static void fdevent_subproc_event_func(int fd, unsigned ev, | ||
556 | 571 | // If there is data left, it will show up in the select(). |
557 | 572 | // This works because there is no other thread reading that |
558 | 573 | // data when in this fd_func(). |
559 | - return; | |
574 | + goto unlock; | |
560 | 575 | } |
561 | 576 | |
562 | 577 | D("subproc_fde.state=%04x\n", subproc_fde->state); |
563 | 578 | subproc_fde->events |= FDE_READ; |
564 | 579 | if(subproc_fde->state & FDE_PENDING) { |
565 | - return; | |
580 | + goto unlock; | |
566 | 581 | } |
567 | 582 | subproc_fde->state |= FDE_PENDING; |
568 | - fdevent_call_fdfunc(subproc_fde); | |
583 | + fdevent_call_fdfunc_locked(subproc_fde); | |
569 | 584 | } |
585 | +unlock: | |
586 | + adb_mutex_unlock(&fdevent_lock); | |
570 | 587 | } |
571 | 588 | |
572 | 589 | fdevent *fdevent_create(int fd, fd_func func, void *arg) |
@@ -590,6 +607,7 @@ void fdevent_destroy(fdevent *fde) | ||
590 | 607 | |
591 | 608 | void fdevent_install(fdevent *fde, int fd, fd_func func, void *arg) |
592 | 609 | { |
610 | + adb_mutex_lock(&fdevent_lock); | |
593 | 611 | memset(fde, 0, sizeof(fdevent)); |
594 | 612 | fde->state = FDE_ACTIVE; |
595 | 613 | fde->fd = fd; |
@@ -604,10 +622,12 @@ void fdevent_install(fdevent *fde, int fd, fd_func func, void *arg) | ||
604 | 622 | dump_fde(fde, "connect"); |
605 | 623 | fdevent_connect(fde); |
606 | 624 | fde->state |= FDE_ACTIVE; |
625 | + adb_mutex_unlock(&fdevent_lock); | |
607 | 626 | } |
608 | 627 | |
609 | 628 | void fdevent_remove(fdevent *fde) |
610 | 629 | { |
630 | + adb_mutex_lock(&fdevent_lock); | |
611 | 631 | if(fde->state & FDE_PENDING) { |
612 | 632 | fdevent_plist_remove(fde); |
613 | 633 | } |
@@ -620,6 +640,7 @@ void fdevent_remove(fdevent *fde) | ||
620 | 640 | |
621 | 641 | fde->state = 0; |
622 | 642 | fde->events = 0; |
643 | + adb_mutex_unlock(&fdevent_lock); | |
623 | 644 | } |
624 | 645 | |
625 | 646 |
@@ -627,7 +648,11 @@ void fdevent_set(fdevent *fde, unsigned events) | ||
627 | 648 | { |
628 | 649 | events &= FDE_EVENTMASK; |
629 | 650 | |
630 | - if((fde->state & FDE_EVENTMASK) == events) return; | |
651 | + adb_mutex_lock(&fdevent_lock); | |
652 | + if((fde->state & FDE_EVENTMASK) == events) { | |
653 | + adb_mutex_unlock(&fdevent_lock); | |
654 | + return; | |
655 | + } | |
631 | 656 | |
632 | 657 | if(fde->state & FDE_ACTIVE) { |
633 | 658 | fdevent_update(fde, events); |
@@ -647,6 +672,7 @@ void fdevent_set(fdevent *fde, unsigned events) | ||
647 | 672 | fde->state &= (~FDE_PENDING); |
648 | 673 | } |
649 | 674 | } |
675 | + adb_mutex_unlock(&fdevent_lock); | |
650 | 676 | } |
651 | 677 | |
652 | 678 | void fdevent_add(fdevent *fde, unsigned events) |
@@ -688,8 +714,10 @@ void fdevent_loop() | ||
688 | 714 | |
689 | 715 | fdevent_process(); |
690 | 716 | |
717 | + adb_mutex_lock(&fdevent_lock); | |
691 | 718 | while((fde = fdevent_plist_dequeue())) { |
692 | - fdevent_call_fdfunc(fde); | |
719 | + fdevent_call_fdfunc_locked(fde); | |
693 | 720 | } |
721 | + adb_mutex_unlock(&fdevent_lock); | |
694 | 722 | } |
695 | 723 | } |
@@ -8,6 +8,7 @@ | ||
8 | 8 | #endif |
9 | 9 | ADB_MUTEX(socket_list_lock) |
10 | 10 | ADB_MUTEX(transport_lock) |
11 | +ADB_MUTEX(fdevent_lock) | |
11 | 12 | #if ADB_HOST |
12 | 13 | ADB_MUTEX(local_transports_lock) |
13 | 14 | #endif |
@@ -517,27 +517,55 @@ struct state_info { | ||
517 | 517 | transport_type transport; |
518 | 518 | char* serial; |
519 | 519 | int state; |
520 | + fdevent fde; | |
521 | + bool abort; | |
520 | 522 | }; |
521 | 523 | |
524 | +void wait_for_state_fd_func(int fd, unsigned ev, void *userdata) | |
525 | +{ | |
526 | + state_info* sinfo = reinterpret_cast<state_info*>(userdata); | |
527 | + | |
528 | + if (ev & FDE_WRITE) { | |
529 | + /* don't care this event */ | |
530 | + fdevent_del(&sinfo->fde, FDE_WRITE); | |
531 | + } | |
532 | + | |
533 | + if (ev & (FDE_READ | FDE_ERROR)) { | |
534 | + D("client exited, stop waiting\n"); | |
535 | + sinfo->abort = true; | |
536 | + fdevent_del(&sinfo->fde, FDE_READ | FDE_ERROR); | |
537 | + } | |
538 | +} | |
539 | + | |
522 | 540 | static void wait_for_state(int fd, void* cookie) |
523 | 541 | { |
524 | 542 | state_info* sinfo = reinterpret_cast<state_info*>(cookie); |
543 | + atransport* t = NULL; | |
525 | 544 | |
526 | 545 | D("wait_for_state %d\n", sinfo->state); |
546 | + sinfo->abort = false; | |
547 | + fdevent_install(&sinfo->fde, fd, wait_for_state_fd_func, cookie); | |
548 | + fdevent_add(&sinfo->fde, FDE_READ | FDE_ERROR); | |
527 | 549 | |
528 | 550 | std::string error_msg = "unknown error"; |
529 | - atransport* t = acquire_one_transport(sinfo->state, sinfo->transport, sinfo->serial, &error_msg); | |
551 | + while (!sinfo->abort) { | |
552 | + t = acquire_one_transport(sinfo->state, sinfo->transport, sinfo->serial, &error_msg); | |
553 | + if (t || (sinfo->state == CS_ANY)) | |
554 | + break; | |
555 | + adb_sleep_ms(1000); | |
556 | + } | |
530 | 557 | if (t != 0) { |
531 | 558 | SendOkay(fd); |
532 | 559 | } else { |
533 | 560 | SendFail(fd, error_msg); |
534 | 561 | } |
535 | 562 | |
563 | + fdevent_remove(&sinfo->fde); | |
564 | + | |
565 | + D("wait_for_state %s\n", sinfo->abort ? "aborted" : "done"); | |
536 | 566 | if (sinfo->serial) |
537 | 567 | free(sinfo->serial); |
538 | 568 | free(sinfo); |
539 | - adb_close(fd); | |
540 | - D("wait_for_state is done\n"); | |
541 | 569 | } |
542 | 570 | |
543 | 571 | static void connect_device(const std::string& host, std::string* response) { |
@@ -649,11 +677,13 @@ asocket* host_service_to_socket(const char* name, const char *serial) | ||
649 | 677 | { |
650 | 678 | if (!strcmp(name,"track-devices")) { |
651 | 679 | return create_device_tracker(); |
652 | - } else if (!strncmp(name, "wait-for-", strlen("wait-for-"))) { | |
653 | - auto sinfo = reinterpret_cast<state_info*>(malloc(sizeof(state_info))); | |
680 | + } else if (android::base::StartsWith(name, "wait-for-")) { | |
681 | + name += strlen("wait-for-"); | |
682 | + | |
683 | + std::unique_ptr<state_info> sinfo(new state_info); | |
654 | 684 | if (sinfo == nullptr) { |
655 | 685 | fprintf(stderr, "couldn't allocate state_info: %s", strerror(errno)); |
656 | - return NULL; | |
686 | + return nullptr; | |
657 | 687 | } |
658 | 688 | |
659 | 689 | if (serial) |
@@ -661,29 +691,38 @@ asocket* host_service_to_socket(const char* name, const char *serial) | ||
661 | 691 | else |
662 | 692 | sinfo->serial = NULL; |
663 | 693 | |
664 | - name += strlen("wait-for-"); | |
665 | - | |
666 | - if (!strncmp(name, "local", strlen("local"))) { | |
694 | + if (android::base::StartsWith(name, "local")) { | |
695 | + name += strlen("local"); | |
667 | 696 | sinfo->transport = kTransportLocal; |
668 | - sinfo->state = CS_DEVICE; | |
669 | - } else if (!strncmp(name, "usb", strlen("usb"))) { | |
697 | + } else if (android::base::StartsWith(name, "usb")) { | |
698 | + name += strlen("usb"); | |
670 | 699 | sinfo->transport = kTransportUsb; |
671 | - sinfo->state = CS_DEVICE; | |
672 | - } else if (!strncmp(name, "any", strlen("any"))) { | |
700 | + } else if (android::base::StartsWith(name, "any")) { | |
701 | + name += strlen("any"); | |
673 | 702 | sinfo->transport = kTransportAny; |
703 | + } else { | |
704 | + return nullptr; | |
705 | + } | |
706 | + | |
707 | + if (!strcmp(name, "-device")) { | |
674 | 708 | sinfo->state = CS_DEVICE; |
709 | + } else if (!strcmp(name, "-recovery")) { | |
710 | + sinfo->state = CS_RECOVERY; | |
711 | + } else if (!strcmp(name, "-sideload")) { | |
712 | + sinfo->state = CS_SIDELOAD; | |
713 | + } else if (!strcmp(name, "-bootloader")) { | |
714 | + sinfo->state = CS_BOOTLOADER; | |
675 | 715 | } else { |
676 | - free(sinfo); | |
677 | - return NULL; | |
716 | + return nullptr; | |
678 | 717 | } |
679 | 718 | |
680 | - int fd = create_service_thread(wait_for_state, sinfo); | |
719 | + int fd = create_service_thread(wait_for_state, sinfo.release()); | |
681 | 720 | return create_local_socket(fd); |
682 | 721 | } else if (!strncmp(name, "connect:", 8)) { |
683 | 722 | const char *host = name + 8; |
684 | 723 | int fd = create_service_thread(connect_service, (void *)host); |
685 | 724 | return create_local_socket(fd); |
686 | 725 | } |
687 | - return NULL; | |
726 | + return nullptr; | |
688 | 727 | } |
689 | 728 | #endif /* ADB_HOST */ |
@@ -53,16 +53,13 @@ static asocket local_socket_closing_list = { | ||
53 | 53 | .prev = &local_socket_closing_list, |
54 | 54 | }; |
55 | 55 | |
56 | -// Parse the global list of sockets to find one with id |local_id|. | |
57 | -// If |peer_id| is not 0, also check that it is connected to a peer | |
58 | -// with id |peer_id|. Returns an asocket handle on success, NULL on failure. | |
59 | -asocket *find_local_socket(unsigned local_id, unsigned peer_id) | |
56 | +static asocket * | |
57 | +find_socket_in_list(asocket *list, unsigned local_id, unsigned peer_id) | |
60 | 58 | { |
61 | 59 | asocket *s; |
62 | 60 | asocket *result = NULL; |
63 | 61 | |
64 | - adb_mutex_lock(&socket_list_lock); | |
65 | - for (s = local_socket_list.next; s != &local_socket_list; s = s->next) { | |
62 | + for (s = list->next; s != list; s = s->next) { | |
66 | 63 | if (s->id != local_id) |
67 | 64 | continue; |
68 | 65 | if (peer_id == 0 || (s->peer && s->peer->id == peer_id)) { |
@@ -70,11 +67,24 @@ asocket *find_local_socket(unsigned local_id, unsigned peer_id) | ||
70 | 67 | } |
71 | 68 | break; |
72 | 69 | } |
73 | - adb_mutex_unlock(&socket_list_lock); | |
74 | 70 | |
75 | 71 | return result; |
76 | 72 | } |
77 | 73 | |
74 | + | |
75 | +// Parse the global list of sockets to find one with id |local_id|. | |
76 | +// If |peer_id| is not 0, also check that it is connected to a peer | |
77 | +// with id |peer_id|. Returns an asocket handle on success, NULL on failure. | |
78 | +asocket *find_local_socket(unsigned local_id, unsigned peer_id) | |
79 | +{ | |
80 | + asocket *result = NULL; | |
81 | + | |
82 | + adb_mutex_lock(&socket_list_lock); | |
83 | + result = find_socket_in_list(&local_socket_list, local_id, peer_id); | |
84 | + adb_mutex_unlock(&socket_list_lock); | |
85 | + return result; | |
86 | +} | |
87 | + | |
78 | 88 | static void |
79 | 89 | insert_local_socket(asocket* s, asocket* list) |
80 | 90 | { |
@@ -113,10 +123,14 @@ void remove_socket(asocket *s) | ||
113 | 123 | } |
114 | 124 | } |
115 | 125 | |
126 | +// Note: after return, all sockets refer to transport @t should be closed. | |
127 | +// (Because the atransport is going to removed.) | |
128 | +// force_close && running flag are to implement this. | |
116 | 129 | void close_all_sockets(atransport *t) |
117 | 130 | { |
118 | 131 | asocket *s; |
119 | 132 | |
133 | + D("close all sockets of transport %p\n", t); | |
120 | 134 | /* this is a little gross, but since s->close() *will* modify |
121 | 135 | ** the list out from under you, your options are limited. |
122 | 136 | */ |
@@ -124,7 +138,17 @@ void close_all_sockets(atransport *t) | ||
124 | 138 | restart: |
125 | 139 | for(s = local_socket_list.next; s != &local_socket_list; s = s->next){ |
126 | 140 | if(s->transport == t || (s->peer && s->peer->transport == t)) { |
127 | - local_socket_close_locked(s); | |
141 | + // set force_close flag since transport is going to be removed. | |
142 | + // we need ensure the socket is closed after we return. | |
143 | + s->force_close = 1; | |
144 | + // avoid race condition with pending fdevent | |
145 | + if (s->running) { | |
146 | + // unlock to give a chance to close socket after running | |
147 | + adb_mutex_unlock(&socket_list_lock); | |
148 | + adb_sleep_ms(10); // sleep to relax cpu | |
149 | + adb_mutex_lock(&socket_list_lock); | |
150 | + } else | |
151 | + local_socket_close_locked(s); | |
128 | 152 | goto restart; |
129 | 153 | } |
130 | 154 | } |
@@ -193,8 +217,18 @@ static void local_socket_ready(asocket *s) | ||
193 | 217 | |
194 | 218 | static void local_socket_close(asocket *s) |
195 | 219 | { |
220 | + unsigned local_id = s->id; | |
221 | + unsigned peer_id = s->peer ? s->peer->id : 0; | |
222 | + asocket *sk; | |
223 | + | |
196 | 224 | adb_mutex_lock(&socket_list_lock); |
197 | - local_socket_close_locked(s); | |
225 | + // we may race with close_all_sockets (called by input-thread), | |
226 | + // so need to check if socket already destoried. | |
227 | + sk = find_socket_in_list(&local_socket_list, local_id, peer_id); | |
228 | + if (!sk) | |
229 | + sk = find_socket_in_list(&local_socket_closing_list, local_id, peer_id); | |
230 | + if (sk) | |
231 | + local_socket_close_locked(s); | |
198 | 232 | adb_mutex_unlock(&socket_list_lock); |
199 | 233 | } |
200 | 234 |
@@ -250,9 +284,10 @@ static void local_socket_close_locked(asocket *s) | ||
250 | 284 | } |
251 | 285 | |
252 | 286 | /* If we are already closing, or if there are no |
253 | - ** pending packets, destroy immediately | |
287 | + ** pending packets, or need force close it, then | |
288 | + ** destroy immediately. | |
254 | 289 | */ |
255 | - if (s->closing || s->pkt_first == NULL) { | |
290 | + if (s->closing || s->force_close || s->pkt_first == NULL) { | |
256 | 291 | int id = s->id; |
257 | 292 | local_socket_destroy(s); |
258 | 293 | D("LS(%d): closed\n", id); |
@@ -272,8 +307,12 @@ static void local_socket_close_locked(asocket *s) | ||
272 | 307 | static void local_socket_event_func(int fd, unsigned ev, void* _s) |
273 | 308 | { |
274 | 309 | asocket* s = reinterpret_cast<asocket*>(_s); |
310 | + s->running = 1; | |
275 | 311 | D("LS(%d): event_func(fd=%d(==%d), ev=%04x)\n", s->id, s->fd, fd, ev); |
276 | 312 | |
313 | + if (s->force_close) | |
314 | + goto out; | |
315 | + | |
277 | 316 | /* put the FDE_WRITE processing before the FDE_READ |
278 | 317 | ** in order to simplify the code. |
279 | 318 | */ |
@@ -287,7 +326,7 @@ static void local_socket_event_func(int fd, unsigned ev, void* _s) | ||
287 | 326 | ** be processed in the next iteration loop |
288 | 327 | */ |
289 | 328 | if (errno == EAGAIN) { |
290 | - return; | |
329 | + goto out; | |
291 | 330 | } |
292 | 331 | } else if (r > 0) { |
293 | 332 | p->ptr += r; |
@@ -296,6 +335,7 @@ static void local_socket_event_func(int fd, unsigned ev, void* _s) | ||
296 | 335 | } |
297 | 336 | |
298 | 337 | D(" closing after write because r=%d and errno is %d\n", r, errno); |
338 | + s->running = 0; | |
299 | 339 | s->close(s); |
300 | 340 | return; |
301 | 341 | } |
@@ -314,6 +354,7 @@ static void local_socket_event_func(int fd, unsigned ev, void* _s) | ||
314 | 354 | */ |
315 | 355 | if (s->closing) { |
316 | 356 | D(" closing because 'closing' is set after write\n"); |
357 | + s->running = 0; | |
317 | 358 | s->close(s); |
318 | 359 | return; |
319 | 360 | } |
@@ -372,7 +413,7 @@ static void local_socket_event_func(int fd, unsigned ev, void* _s) | ||
372 | 413 | ** this handler function will be called again |
373 | 414 | ** to process FDE_WRITE events. |
374 | 415 | */ |
375 | - return; | |
416 | + goto out; | |
376 | 417 | } |
377 | 418 | |
378 | 419 | if (r > 0) { |
@@ -387,7 +428,9 @@ static void local_socket_event_func(int fd, unsigned ev, void* _s) | ||
387 | 428 | if ((s->fde.force_eof && !r) || is_eof) { |
388 | 429 | D(" closing because is_eof=%d r=%d s->fde.force_eof=%d\n", |
389 | 430 | is_eof, r, s->fde.force_eof); |
431 | + s->running = 0; | |
390 | 432 | s->close(s); |
433 | + return; | |
391 | 434 | } |
392 | 435 | } |
393 | 436 |
@@ -398,7 +441,13 @@ static void local_socket_event_func(int fd, unsigned ev, void* _s) | ||
398 | 441 | */ |
399 | 442 | D("LS(%d): FDE_ERROR (fd=%d)\n", s->id, s->fd); |
400 | 443 | |
401 | - return; | |
444 | + goto out; | |
445 | + } | |
446 | +out: | |
447 | + s->running = 0; | |
448 | + if (s->force_close) { | |
449 | + D("LS(%d): force closing (fd=%d)\n", s->id, s->fd); | |
450 | + s->close(s); | |
402 | 451 | } |
403 | 452 | } |
404 | 453 |
@@ -406,6 +455,7 @@ asocket *create_local_socket(int fd) | ||
406 | 455 | { |
407 | 456 | asocket *s = reinterpret_cast<asocket*>(calloc(1, sizeof(asocket))); |
408 | 457 | if (s == NULL) fatal("cannot allocate socket"); |
458 | + memset(s, 0, sizeof(asocket)); | |
409 | 459 | s->fd = fd; |
410 | 460 | s->enqueue = local_socket_enqueue; |
411 | 461 | s->ready = local_socket_ready; |
@@ -551,6 +601,7 @@ asocket *create_remote_socket(unsigned id, atransport *t) | ||
551 | 601 | adisconnect* dis = &reinterpret_cast<aremotesocket*>(s)->disconnect; |
552 | 602 | |
553 | 603 | if (s == NULL) fatal("cannot allocate socket"); |
604 | + memset(s, 0, sizeof(asocket)); | |
554 | 605 | s->id = id; |
555 | 606 | s->enqueue = remote_socket_enqueue; |
556 | 607 | s->ready = remote_socket_ready; |
@@ -883,6 +934,7 @@ static asocket *create_smart_socket(void) | ||
883 | 934 | D("Creating smart socket \n"); |
884 | 935 | asocket *s = reinterpret_cast<asocket*>(calloc(1, sizeof(asocket))); |
885 | 936 | if (s == NULL) fatal("cannot allocate socket"); |
937 | + memset(s, 0, sizeof(asocket)); | |
886 | 938 | s->enqueue = smart_socket_enqueue; |
887 | 939 | s->ready = smart_socket_ready; |
888 | 940 | s->shutdown = NULL; |
@@ -27,6 +27,9 @@ | ||
27 | 27 | |
28 | 28 | #include "adb.h" |
29 | 29 | |
30 | +/* TODO: implemente fdevent synchronization for Windows platform. */ | |
31 | +ADB_MUTEX_DEFINE( fdevent_lock ); | |
32 | + | |
30 | 33 | extern void fatal(const char *fmt, ...); |
31 | 34 | |
32 | 35 | /* forward declarations */ |
@@ -747,7 +747,6 @@ atransport* acquire_one_transport(int state, transport_type ttype, | ||
747 | 747 | atransport *result = NULL; |
748 | 748 | int ambiguous = 0; |
749 | 749 | |
750 | -retry: | |
751 | 750 | if (error_out) *error_out = android::base::StringPrintf("device '%s' not found", serial); |
752 | 751 | |
753 | 752 | adb_mutex_lock(&transport_lock); |
@@ -831,9 +830,6 @@ retry: | ||
831 | 830 | if (result) { |
832 | 831 | /* found one that we can take */ |
833 | 832 | if (error_out) *error_out = "success"; |
834 | - } else if (state != CS_ANY && (serial || !ambiguous)) { | |
835 | - adb_sleep_ms(1000); | |
836 | - goto retry; | |
837 | 833 | } |
838 | 834 | |
839 | 835 | return result; |
@@ -25,31 +25,51 @@ | ||
25 | 25 | |
26 | 26 | #include "adb.h" |
27 | 27 | |
28 | +#if !ADB_HOST | |
29 | +#include <syslog.h> | |
30 | +#endif | |
31 | + | |
32 | +#define MAX_CONSECUTIVE_USB_ISSUES 3 | |
33 | + | |
28 | 34 | static int remote_read(apacket *p, atransport *t) |
29 | 35 | { |
36 | + static int consecutives_errors_count = 0; | |
30 | 37 | if(usb_read(t->usb, &p->msg, sizeof(amessage))){ |
31 | 38 | D("remote usb: read terminated (message)\n"); |
32 | - return -1; | |
39 | + goto err; | |
33 | 40 | } |
34 | 41 | |
35 | 42 | if(check_header(p)) { |
36 | 43 | D("remote usb: check_header failed\n"); |
37 | - return -1; | |
44 | + goto err; | |
38 | 45 | } |
39 | 46 | |
40 | 47 | if(p->msg.data_length) { |
41 | 48 | if(usb_read(t->usb, p->data, p->msg.data_length)){ |
42 | 49 | D("remote usb: terminated (data)\n"); |
43 | - return -1; | |
50 | + goto err; | |
44 | 51 | } |
45 | 52 | } |
46 | 53 | |
47 | 54 | if(check_data(p)) { |
48 | 55 | D("remote usb: check_data failed\n"); |
49 | - return -1; | |
56 | + goto err; | |
50 | 57 | } |
51 | 58 | |
59 | + consecutives_errors_count = 0; | |
52 | 60 | return 0; |
61 | +err: | |
62 | + if (++consecutives_errors_count > MAX_CONSECUTIVE_USB_ISSUES) { | |
63 | +#if !ADB_HOST | |
64 | + /* Make sure we log the exit on the target */ | |
65 | + syslog(LOG_CRIT, "%s:%d - remote usb: too many consecutives usb errors(%d), exits the process\n", __FILE__, __LINE__, consecutives_errors_count); | |
66 | +#else | |
67 | + fatal("%s:%d - remote usb: too many consecutives usb errors(%d), exits the process\n", __FILE__, __LINE__, consecutives_errors_count); | |
68 | +#endif | |
69 | + /* no need to generate a coredump or generate a crash here, the pb is functionnal */ | |
70 | + exit(-1); | |
71 | + } | |
72 | + return -1; | |
53 | 73 | } |
54 | 74 | |
55 | 75 | static int remote_write(apacket *p, atransport *t) |
@@ -577,6 +577,7 @@ int fs_mgr_mount_all(struct fstab *fstab) | ||
577 | 577 | |
578 | 578 | /* mount(2) returned an error, handle the encryptable/formattable case */ |
579 | 579 | bool wiped = partition_wiped(fstab->recs[top_idx].blk_device); |
580 | + bool crypt_footer = false; | |
580 | 581 | if (mret && mount_errno != EBUSY && mount_errno != EACCES && |
581 | 582 | fs_mgr_is_formattable(&fstab->recs[top_idx]) && wiped) { |
582 | 583 | /* top_idx and attempted_idx point at the same partition, but sometimes |
@@ -597,8 +598,11 @@ int fs_mgr_mount_all(struct fstab *fstab) | ||
597 | 598 | ERROR("%s(): %s wouldn't open (%s)\n", __func__, |
598 | 599 | fstab->recs[top_idx].key_loc, strerror(errno)); |
599 | 600 | } |
601 | + } else if (fs_mgr_is_encryptable(&fstab->recs[top_idx]) && | |
602 | + !strcmp(fstab->recs[top_idx].key_loc, KEY_IN_FOOTER)) { | |
603 | + crypt_footer = true; | |
600 | 604 | } |
601 | - if (fs_mgr_do_format(&fstab->recs[top_idx]) == 0) { | |
605 | + if (fs_mgr_do_format(&fstab->recs[top_idx], crypt_footer) == 0) { | |
602 | 606 | /* Let's replay the mount actions. */ |
603 | 607 | i = top_idx - 1; |
604 | 608 | continue; |
@@ -27,11 +27,12 @@ | ||
27 | 27 | #include "ext4.h" |
28 | 28 | #include "make_ext4fs.h" |
29 | 29 | #include "fs_mgr_priv.h" |
30 | +#include "cryptfs.h" | |
30 | 31 | |
31 | 32 | extern struct fs_info info; /* magic global from ext4_utils */ |
32 | 33 | extern void reset_ext4fs_info(); |
33 | 34 | |
34 | -static int format_ext4(char *fs_blkdev, char *fs_mnt_point) | |
35 | +static int format_ext4(char *fs_blkdev, char *fs_mnt_point, bool crypt_footer) | |
35 | 36 | { |
36 | 37 | unsigned int nr_sec; |
37 | 38 | int fd, rc = 0; |
@@ -50,6 +51,9 @@ static int format_ext4(char *fs_blkdev, char *fs_mnt_point) | ||
50 | 51 | /* Format the partition using the calculated length */ |
51 | 52 | reset_ext4fs_info(); |
52 | 53 | info.len = ((off64_t)nr_sec * 512); |
54 | + if (crypt_footer) { | |
55 | + info.len -= CRYPT_FOOTER_OFFSET; | |
56 | + } | |
53 | 57 | |
54 | 58 | /* Use make_ext4fs_internal to avoid wiping an already-wiped partition. */ |
55 | 59 | rc = make_ext4fs_internal(fd, NULL, NULL, fs_mnt_point, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL); |
@@ -101,7 +105,7 @@ static int format_f2fs(char *fs_blkdev) | ||
101 | 105 | return rc; |
102 | 106 | } |
103 | 107 | |
104 | -int fs_mgr_do_format(struct fstab_rec *fstab) | |
108 | +int fs_mgr_do_format(struct fstab_rec *fstab, bool crypt_footer) | |
105 | 109 | { |
106 | 110 | int rc = -EINVAL; |
107 | 111 |
@@ -110,7 +114,7 @@ int fs_mgr_do_format(struct fstab_rec *fstab) | ||
110 | 114 | if (!strncmp(fstab->fs_type, "f2fs", 4)) { |
111 | 115 | rc = format_f2fs(fstab->blk_device); |
112 | 116 | } else if (!strncmp(fstab->fs_type, "ext4", 4)) { |
113 | - rc = format_ext4(fstab->blk_device, fstab->mount_point); | |
117 | + rc = format_ext4(fstab->blk_device, fstab->mount_point, crypt_footer); | |
114 | 118 | } else { |
115 | 119 | ERROR("File system type '%s' is not supported\n", fstab->fs_type); |
116 | 120 | } |
@@ -18,6 +18,7 @@ | ||
18 | 18 | #define __CORE_FS_MGR_H |
19 | 19 | |
20 | 20 | #include <stdint.h> |
21 | +#include <stdbool.h> | |
21 | 22 | #include <linux/dm-ioctl.h> |
22 | 23 | |
23 | 24 | // Magic number at start of verity metadata |
@@ -107,7 +108,7 @@ int fs_mgr_is_notrim(struct fstab_rec *fstab); | ||
107 | 108 | int fs_mgr_is_formattable(struct fstab_rec *fstab); |
108 | 109 | int fs_mgr_swapon_all(struct fstab *fstab); |
109 | 110 | |
110 | -int fs_mgr_do_format(struct fstab_rec *fstab); | |
111 | +int fs_mgr_do_format(struct fstab_rec *fstab, bool reserve_footer); | |
111 | 112 | |
112 | 113 | #ifdef __cplusplus |
113 | 114 | } |
@@ -137,6 +137,7 @@ BatteryMonitor::PowerSupplyType BatteryMonitor::readPowerSupplyType(const String | ||
137 | 137 | { "USB_DCP", ANDROID_POWER_SUPPLY_TYPE_AC }, |
138 | 138 | { "USB_CDP", ANDROID_POWER_SUPPLY_TYPE_AC }, |
139 | 139 | { "USB_ACA", ANDROID_POWER_SUPPLY_TYPE_AC }, |
140 | + { "USB_TYPEC", ANDROID_POWER_SUPPLY_TYPE_AC }, | |
140 | 141 | { "Wireless", ANDROID_POWER_SUPPLY_TYPE_WIRELESS }, |
141 | 142 | { NULL, 0 }, |
142 | 143 | }; |
@@ -570,9 +570,10 @@ static void handle_power_supply_state(struct charger *charger, int64_t now) | ||
570 | 570 | /* Last cycle would have stopped at the extreme top of battery-icon |
571 | 571 | * Need to show the correct level corresponding to capacity. |
572 | 572 | */ |
573 | - kick_animation(charger->batt_anim); | |
574 | 573 | request_suspend(false); |
575 | 574 | if (charger->next_pwr_check == -1) { |
575 | + /* only kick animation once for charger unplug case. */ | |
576 | + kick_animation(charger->batt_anim); | |
576 | 577 | charger->next_pwr_check = now + UNPLUGGED_SHUTDOWN_TIME; |
577 | 578 | LOGW("[%" PRId64 "] device unplugged: shutting down in %" PRId64 " (@ %" PRId64 ")\n", |
578 | 579 | now, (int64_t)UNPLUGGED_SHUTDOWN_TIME, charger->next_pwr_check); |
@@ -165,6 +165,10 @@ int32_t StartIteration(ZipArchiveHandle handle, void** cookie_ptr, | ||
165 | 165 | */ |
166 | 166 | int32_t Next(void* cookie, ZipEntry* data, ZipEntryName *name); |
167 | 167 | |
168 | +#ifdef ZIP_NO_INTEGRITY | |
169 | +int32_t NextNoIntegrity(void* cookie, ZipEntry* data, ZipEntryName *name); | |
170 | +#endif | |
171 | + | |
168 | 172 | /* |
169 | 173 | * End iteration over all entries of a zip file and frees the memory allocated |
170 | 174 | * in StartIteration. |
@@ -943,7 +943,16 @@ int selinux_reload_policy(void) | ||
943 | 943 | } |
944 | 944 | |
945 | 945 | static int audit_callback(void *data, security_class_t /*cls*/, char *buf, size_t len) { |
946 | - snprintf(buf, len, "property=%s", !data ? "NULL" : (char *)data); | |
946 | + | |
947 | + property_audit_data *d = reinterpret_cast<property_audit_data*>(data); | |
948 | + | |
949 | + if (!d || !d->name || !d->cr) { | |
950 | + ERROR("audit_callback invoked with null data arguments!"); | |
951 | + return 0; | |
952 | + } | |
953 | + | |
954 | + snprintf(buf, len, "property=%s pid=%d uid=%d gid=%d", d->name, | |
955 | + d->cr->pid, d->cr->uid, d->cr->gid); | |
947 | 956 | return 0; |
948 | 957 | } |
949 | 958 |
@@ -90,13 +90,14 @@ void property_init() { | ||
90 | 90 | } |
91 | 91 | } |
92 | 92 | |
93 | -static int check_mac_perms(const char *name, char *sctx) | |
93 | +static int check_mac_perms(const char *name, char *sctx, struct ucred *cr) | |
94 | 94 | { |
95 | 95 | if (is_selinux_enabled() <= 0) |
96 | 96 | return 1; |
97 | 97 | |
98 | 98 | char *tctx = NULL; |
99 | 99 | int result = 0; |
100 | + property_audit_data audit_data; | |
100 | 101 | |
101 | 102 | if (!sctx) |
102 | 103 | goto err; |
@@ -107,7 +108,10 @@ static int check_mac_perms(const char *name, char *sctx) | ||
107 | 108 | if (selabel_lookup(sehandle_prop, &tctx, name, 1) != 0) |
108 | 109 | goto err; |
109 | 110 | |
110 | - if (selinux_check_access(sctx, tctx, "property_service", "set", (void*) name) == 0) | |
111 | + audit_data.name = name; | |
112 | + audit_data.cr = cr; | |
113 | + | |
114 | + if (selinux_check_access(sctx, tctx, "property_service", "set", reinterpret_cast<void*>(&audit_data)) == 0) | |
111 | 115 | result = 1; |
112 | 116 | |
113 | 117 | freecon(tctx); |
@@ -115,7 +119,7 @@ static int check_mac_perms(const char *name, char *sctx) | ||
115 | 119 | return result; |
116 | 120 | } |
117 | 121 | |
118 | -static int check_control_mac_perms(const char *name, char *sctx) | |
122 | +static int check_control_mac_perms(const char *name, char *sctx, struct ucred *cr) | |
119 | 123 | { |
120 | 124 | /* |
121 | 125 | * Create a name prefix out of ctl.<service name> |
@@ -129,19 +133,19 @@ static int check_control_mac_perms(const char *name, char *sctx) | ||
129 | 133 | if (ret < 0 || (size_t) ret >= sizeof(ctl_name)) |
130 | 134 | return 0; |
131 | 135 | |
132 | - return check_mac_perms(ctl_name, sctx); | |
136 | + return check_mac_perms(ctl_name, sctx, cr); | |
133 | 137 | } |
134 | 138 | |
135 | 139 | /* |
136 | 140 | * Checks permissions for setting system properties. |
137 | 141 | * Returns 1 if uid allowed, 0 otherwise. |
138 | 142 | */ |
139 | -static int check_perms(const char *name, char *sctx) | |
143 | +static int check_perms(const char *name, char *sctx, struct ucred *cr) | |
140 | 144 | { |
141 | 145 | if(!strncmp(name, "ro.", 3)) |
142 | 146 | name +=3; |
143 | 147 | |
144 | - return check_mac_perms(name, sctx); | |
148 | + return check_mac_perms(name, sctx, cr); | |
145 | 149 | } |
146 | 150 | |
147 | 151 | int __property_get(const char *name, char *value) |
@@ -323,14 +327,14 @@ static void handle_property_set_fd() | ||
323 | 327 | // Keep the old close-socket-early behavior when handling |
324 | 328 | // ctl.* properties. |
325 | 329 | close(s); |
326 | - if (check_control_mac_perms(msg.value, source_ctx)) { | |
330 | + if (check_control_mac_perms(msg.value, source_ctx, &cr)) { | |
327 | 331 | handle_control_message((char*) msg.name + 4, (char*) msg.value); |
328 | 332 | } else { |
329 | 333 | ERROR("sys_prop: Unable to %s service ctl [%s] uid:%d gid:%d pid:%d\n", |
330 | 334 | msg.name + 4, msg.value, cr.uid, cr.gid, cr.pid); |
331 | 335 | } |
332 | 336 | } else { |
333 | - if (check_perms(msg.name, source_ctx)) { | |
337 | + if (check_perms(msg.name, source_ctx, &cr)) { | |
334 | 338 | property_set((char*) msg.name, (char*) msg.value); |
335 | 339 | } else { |
336 | 340 | ERROR("sys_prop: permission denied uid:%d name:%s\n", |
@@ -18,8 +18,14 @@ | ||
18 | 18 | #define _INIT_PROPERTY_H |
19 | 19 | |
20 | 20 | #include <stddef.h> |
21 | +#include <sys/socket.h> | |
21 | 22 | #include <sys/system_properties.h> |
22 | 23 | |
24 | +struct property_audit_data { | |
25 | + ucred *cr; | |
26 | + const char* name; | |
27 | +}; | |
28 | + | |
23 | 29 | extern void property_init(void); |
24 | 30 | extern void property_load_boot_defaults(void); |
25 | 31 | extern int load_properties_from_file(const char *fn, const char *); |
@@ -14,6 +14,9 @@ LOCAL_SHARED_LIBRARIES := liblog | ||
14 | 14 | LOCAL_CLANG := true |
15 | 15 | LOCAL_CPP_EXTENSION := .cc |
16 | 16 | LOCAL_CFLAGS += -Werror -Wall |
17 | +ifeq ($(COMPATIBILITY_ENHANCEMENT_PACKAGE), true) | |
18 | + LOCAL_CFLAGS += -D_COMPATIBILITY_ENHANCEMENT_PACKAGE_ | |
19 | +endif | |
17 | 20 | LOCAL_CPPFLAGS := -std=gnu++11 -fvisibility=protected |
18 | 21 | LOCAL_LDFLAGS := -ldl |
19 | 22 | LOCAL_MULTILIB := both |
@@ -98,6 +98,10 @@ static constexpr const char* kCodeCacheDir = "code_cache"; | ||
98 | 98 | |
99 | 99 | static constexpr uint32_t kLibNativeBridgeVersion = 2; |
100 | 100 | |
101 | +#ifdef _COMPATIBILITY_ENHANCEMENT_PACKAGE_ | |
102 | +static bool null_instruction_set = false; | |
103 | +#endif | |
104 | + | |
101 | 105 | // Characters allowed in a native bridge filename. The first character must |
102 | 106 | // be in [a-zA-Z] (expected 'l' for "libx"). The rest must be in [a-zA-Z0-9._-]. |
103 | 107 | static bool CharacterAllowed(char c, bool first) { |
@@ -109,6 +113,13 @@ static bool CharacterAllowed(char c, bool first) { | ||
109 | 113 | } |
110 | 114 | } |
111 | 115 | |
116 | +static void ReleaseAppCodeCacheDir() { | |
117 | + if (app_code_cache_dir != nullptr) { | |
118 | + delete[] app_code_cache_dir; | |
119 | + app_code_cache_dir = nullptr; | |
120 | + } | |
121 | +} | |
122 | + | |
112 | 123 | // We only allow simple names for the library. It is supposed to be a file in |
113 | 124 | // /system/lib or /vendor/lib. Only allow a small range of characters, that is |
114 | 125 | // names consisting of [a-zA-Z0-9._-] and starting with [a-zA-Z]. |
@@ -162,8 +173,7 @@ static bool VersionCheck(const NativeBridgeCallbacks* cb) { | ||
162 | 173 | static void CloseNativeBridge(bool with_error) { |
163 | 174 | state = NativeBridgeState::kClosed; |
164 | 175 | had_error |= with_error; |
165 | - delete[] app_code_cache_dir; | |
166 | - app_code_cache_dir = nullptr; | |
176 | + ReleaseAppCodeCacheDir(); | |
167 | 177 | } |
168 | 178 | |
169 | 179 | bool LoadNativeBridge(const char* nb_library_filename, |
@@ -238,8 +248,14 @@ static const char* kRuntimeISA = "unknown"; | ||
238 | 248 | |
239 | 249 | bool NeedsNativeBridge(const char* instruction_set) { |
240 | 250 | if (instruction_set == nullptr) { |
251 | + | |
252 | +#ifdef _COMPATIBILITY_ENHANCEMENT_PACKAGE_ | |
253 | + null_instruction_set = true; | |
254 | + return true; | |
255 | +#else | |
241 | 256 | ALOGE("Null instruction set in NeedsNativeBridge."); |
242 | 257 | return false; |
258 | +#endif | |
243 | 259 | } |
244 | 260 | return strncmp(instruction_set, kRuntimeISA, strlen(kRuntimeISA) + 1) != 0; |
245 | 261 | } |
@@ -255,6 +271,15 @@ bool PreInitializeNativeBridge(const char* app_data_dir_in, const char* instruct | ||
255 | 271 | return false; |
256 | 272 | } |
257 | 273 | |
274 | +#ifdef _COMPATIBILITY_ENHANCEMENT_PACKAGE_ | |
275 | + if (app_data_dir_in != nullptr) { | |
276 | + // Create the path to the application code cache directory. | |
277 | + // The memory will be release after Initialization or when the native bridge is closed. | |
278 | + const size_t len = strlen(app_data_dir_in) + strlen(kCodeCacheDir) + 2; // '\0' + '/' | |
279 | + app_code_cache_dir = new char[len]; | |
280 | + snprintf(app_code_cache_dir, len, "%s/%s", app_data_dir_in, kCodeCacheDir); | |
281 | + } | |
282 | +#else | |
258 | 283 | if (app_data_dir_in == nullptr) { |
259 | 284 | ALOGE("Application private directory cannot be null."); |
260 | 285 | CloseNativeBridge(true); |
@@ -266,13 +291,18 @@ bool PreInitializeNativeBridge(const char* app_data_dir_in, const char* instruct | ||
266 | 291 | const size_t len = strlen(app_data_dir_in) + strlen(kCodeCacheDir) + 2; // '\0' + '/' |
267 | 292 | app_code_cache_dir = new char[len]; |
268 | 293 | snprintf(app_code_cache_dir, len, "%s/%s", app_data_dir_in, kCodeCacheDir); |
294 | +#endif | |
269 | 295 | |
270 | 296 | // Bind-mount /system/lib{,64}/<isa>/cpuinfo to /proc/cpuinfo. |
271 | 297 | // Failure is not fatal and will keep the native bridge in kPreInitialized. |
272 | 298 | state = NativeBridgeState::kPreInitialized; |
273 | 299 | |
274 | 300 | #ifndef __APPLE__ |
301 | +#ifdef _COMPATIBILITY_ENHANCEMENT_PACKAGE_ | |
302 | + if (null_instruction_set || instruction_set == nullptr || app_data_dir_in == nullptr) { | |
303 | +#else | |
275 | 304 | if (instruction_set == nullptr) { |
305 | +#endif | |
276 | 306 | return true; |
277 | 307 | } |
278 | 308 | size_t isa_len = strlen(instruction_set); |
@@ -401,31 +431,42 @@ bool InitializeNativeBridge(JNIEnv* env, const char* instruction_set) { | ||
401 | 431 | // point we are not multi-threaded, so we do not need locking here. |
402 | 432 | |
403 | 433 | if (state == NativeBridgeState::kPreInitialized) { |
434 | +#ifdef _COMPATIBILITY_ENHANCEMENT_PACKAGE_ | |
435 | + if (app_code_cache_dir != nullptr) { | |
436 | +#endif | |
404 | 437 | // Check for code cache: if it doesn't exist try to create it. |
405 | 438 | struct stat st; |
406 | 439 | if (stat(app_code_cache_dir, &st) == -1) { |
407 | 440 | if (errno == ENOENT) { |
408 | 441 | if (mkdir(app_code_cache_dir, S_IRWXU | S_IRWXG | S_IXOTH) == -1) { |
409 | - ALOGE("Cannot create code cache directory %s: %s.", app_code_cache_dir, strerror(errno)); | |
410 | - CloseNativeBridge(true); | |
442 | + ALOGW("Cannot create code cache directory %s: %s.", app_code_cache_dir, strerror(errno)); | |
443 | + ReleaseAppCodeCacheDir(); | |
411 | 444 | } |
412 | 445 | } else { |
413 | - ALOGE("Cannot stat code cache directory %s: %s.", app_code_cache_dir, strerror(errno)); | |
414 | - CloseNativeBridge(true); | |
446 | + ALOGW("Cannot stat code cache directory %s: %s.", app_code_cache_dir, strerror(errno)); | |
447 | + ReleaseAppCodeCacheDir(); | |
415 | 448 | } |
416 | 449 | } else if (!S_ISDIR(st.st_mode)) { |
417 | - ALOGE("Code cache is not a directory %s.", app_code_cache_dir); | |
418 | - CloseNativeBridge(true); | |
450 | + ALOGW("Code cache is not a directory %s.", app_code_cache_dir); | |
451 | + ReleaseAppCodeCacheDir(); | |
419 | 452 | } |
453 | +#ifdef _COMPATIBILITY_ENHANCEMENT_PACKAGE_ | |
454 | + } | |
455 | +#endif | |
420 | 456 | |
421 | 457 | // If we're still PreInitialized (dind't fail the code cache checks) try to initialize. |
422 | 458 | if (state == NativeBridgeState::kPreInitialized) { |
423 | 459 | if (callbacks->initialize(runtime_callbacks, app_code_cache_dir, instruction_set)) { |
460 | +#ifdef _COMPATIBILITY_ENHANCEMENT_PACKAGE_ | |
461 | + if (!null_instruction_set) { | |
462 | + SetupEnvironment(callbacks, env, instruction_set); | |
463 | + } | |
464 | +#else | |
424 | 465 | SetupEnvironment(callbacks, env, instruction_set); |
466 | +#endif | |
425 | 467 | state = NativeBridgeState::kInitialized; |
426 | 468 | // We no longer need the code cache path, release the memory. |
427 | - delete[] app_code_cache_dir; | |
428 | - app_code_cache_dir = nullptr; | |
469 | + ReleaseAppCodeCacheDir(); | |
429 | 470 | } else { |
430 | 471 | // Unload the library. |
431 | 472 | dlclose(native_bridge_handle); |
@@ -9,6 +9,7 @@ include $(CLEAR_VARS) | ||
9 | 9 | test_src_files := \ |
10 | 10 | CodeCacheCreate_test.cpp \ |
11 | 11 | CodeCacheExists_test.cpp \ |
12 | + CodeCacheStatFail_test.cpp \ | |
12 | 13 | CompleteFlow_test.cpp \ |
13 | 14 | InvalidCharsNativeBridge_test.cpp \ |
14 | 15 | NativeBridge2Signal_test.cpp \ |
@@ -0,0 +1,51 @@ | ||
1 | +/* | |
2 | + * Copyright (C) 2014 The Android Open Source Project | |
3 | + * | |
4 | + * Licensed under the Apache License, Version 2.0 (the "License"); | |
5 | + * you may not use this file except in compliance with the License. | |
6 | + * You may obtain a copy of the License at | |
7 | + * | |
8 | + * http://www.apache.org/licenses/LICENSE-2.0 | |
9 | + * | |
10 | + * Unless required by applicable law or agreed to in writing, software | |
11 | + * distributed under the License is distributed on an "AS IS" BASIS, | |
12 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
13 | + * See the License for the specific language governing permissions and | |
14 | + * limitations under the License. | |
15 | + */ | |
16 | + | |
17 | +#include "NativeBridgeTest.h" | |
18 | + | |
19 | +#include <errno.h> | |
20 | +#include <sys/stat.h> | |
21 | +#include <unistd.h> | |
22 | +#include <fcntl.h> | |
23 | + | |
24 | +namespace android { | |
25 | + | |
26 | +// Tests that the bridge is initialized without errors if the code_cache is | |
27 | +// existed as a file. | |
28 | +TEST_F(NativeBridgeTest, CodeCacheStatFail) { | |
29 | + int fd = creat(kCodeCache, O_RDWR); | |
30 | + ASSERT_NE(-1, fd); | |
31 | + close(fd); | |
32 | + | |
33 | + struct stat st; | |
34 | + ASSERT_EQ(-1, stat(kCodeCacheStatFail, &st)); | |
35 | + ASSERT_EQ(ENOTDIR, errno); | |
36 | + | |
37 | + // Init | |
38 | + ASSERT_TRUE(LoadNativeBridge(kNativeBridgeLibrary, nullptr)); | |
39 | + ASSERT_TRUE(PreInitializeNativeBridge(kCodeCacheStatFail, "isa")); | |
40 | + ASSERT_TRUE(InitializeNativeBridge(nullptr, nullptr)); | |
41 | + ASSERT_TRUE(NativeBridgeAvailable()); | |
42 | + ASSERT_FALSE(NativeBridgeError()); | |
43 | + | |
44 | + // Clean up | |
45 | + UnloadNativeBridge(); | |
46 | + | |
47 | + ASSERT_FALSE(NativeBridgeError()); | |
48 | + unlink(kCodeCache); | |
49 | +} | |
50 | + | |
51 | +} // namespace android |
@@ -24,6 +24,7 @@ | ||
24 | 24 | |
25 | 25 | constexpr const char* kNativeBridgeLibrary = "libnativebridge-dummy.so"; |
26 | 26 | constexpr const char* kCodeCache = "./code_cache"; |
27 | +constexpr const char* kCodeCacheStatFail = "./code_cache/temp"; | |
27 | 28 | |
28 | 29 | namespace android { |
29 | 30 |
@@ -24,6 +24,9 @@ LOCAL_STATIC_LIBRARIES := libz | ||
24 | 24 | LOCAL_SHARED_LIBRARIES := libutils libbase |
25 | 25 | LOCAL_MODULE:= libziparchive |
26 | 26 | LOCAL_CFLAGS := -Werror -Wall |
27 | +ifeq ($(ZIP_OPTIMIZATION_NO_INTEGRITY),true) | |
28 | + LOCAL_CFLAGS += -DZIP_NO_INTEGRITY | |
29 | +endif | |
27 | 30 | LOCAL_CPPFLAGS := -Wold-style-cast |
28 | 31 | include $(BUILD_STATIC_LIBRARY) |
29 | 32 |
@@ -34,7 +37,10 @@ LOCAL_STATIC_LIBRARIES := libz libutils libbase | ||
34 | 37 | LOCAL_MODULE:= libziparchive-host |
35 | 38 | LOCAL_CFLAGS := -Werror |
36 | 39 | ifneq ($(strip $(USE_MINGW)),) |
37 | - LOCAL_CFLAGS += -mno-ms-bitfields | |
40 | + LOCAL_CFLAGS += -mno-ms-bitfields | |
41 | +endif | |
42 | +ifeq ($(ZIP_OPTIMIZATION_NO_INTEGRITY),true) | |
43 | + LOCAL_CFLAGS += -DZIP_NO_INTEGRITY | |
38 | 44 | endif |
39 | 45 | LOCAL_MULTILIB := both |
40 | 46 | include $(BUILD_HOST_STATIC_LIBRARY) |
@@ -46,6 +52,9 @@ LOCAL_STATIC_LIBRARIES := libz libutils | ||
46 | 52 | LOCAL_SHARED_LIBRARIES := liblog libbase |
47 | 53 | LOCAL_MODULE:= libziparchive-host |
48 | 54 | LOCAL_CFLAGS := -Werror |
55 | +ifeq ($(ZIP_OPTIMIZATION_NO_INTEGRITY),true) | |
56 | + LOCAL_CFLAGS += -DZIP_NO_INTEGRITY | |
57 | +endif | |
49 | 58 | LOCAL_MULTILIB := both |
50 | 59 | include $(BUILD_HOST_SHARED_LIBRARY) |
51 | 60 |
@@ -54,6 +63,9 @@ include $(CLEAR_VARS) | ||
54 | 63 | LOCAL_MODULE := ziparchive-tests |
55 | 64 | LOCAL_CPP_EXTENSION := .cc |
56 | 65 | LOCAL_CFLAGS := -Werror |
66 | +ifeq ($(ZIP_OPTIMIZATION_NO_INTEGRITY),true) | |
67 | + LOCAL_CFLAGS += -DZIP_NO_INTEGRITY | |
68 | +endif | |
57 | 69 | LOCAL_SRC_FILES := zip_archive_test.cc entry_name_utils_test.cc |
58 | 70 | LOCAL_SHARED_LIBRARIES := liblog libbase |
59 | 71 | LOCAL_STATIC_LIBRARIES := libziparchive libz libutils |
@@ -65,6 +77,9 @@ LOCAL_CPP_EXTENSION := .cc | ||
65 | 77 | LOCAL_CFLAGS += \ |
66 | 78 | -Werror \ |
67 | 79 | -Wno-unnamed-type-template-args |
80 | +ifeq ($(ZIP_OPTIMIZATION_NO_INTEGRITY),true) | |
81 | + LOCAL_CFLAGS += -DZIP_NO_INTEGRITY | |
82 | +endif | |
68 | 83 | LOCAL_SRC_FILES := zip_archive_test.cc entry_name_utils_test.cc |
69 | 84 | LOCAL_SHARED_LIBRARIES := libziparchive-host liblog libbase |
70 | 85 | LOCAL_STATIC_LIBRARIES := \ |
@@ -712,6 +712,57 @@ static inline ssize_t ReadAtOffset(int fd, uint8_t* buf, size_t len, | ||
712 | 712 | #endif |
713 | 713 | } |
714 | 714 | |
715 | +#ifdef ZIP_NO_INTEGRITY | |
716 | +static int32_t FindEntryNoIntegrity(const ZipArchive* archive, const int ent, | |
717 | + ZipEntry* data) { | |
718 | + const uint16_t nameLen = archive->hash_table[ent].name_length; | |
719 | + | |
720 | + // Recover the start of the central directory entry from the filename | |
721 | + // pointer. The filename is the first entry past the fixed-size data, | |
722 | + // so we can just subtract back from that. | |
723 | + const uint8_t* ptr = archive->hash_table[ent].name; | |
724 | + ptr -= sizeof(CentralDirectoryRecord); | |
725 | + | |
726 | + // This is the base of our mmapped region, we have to sanity check that | |
727 | + // the name that's in the hash table is a pointer to a location within | |
728 | + // this mapped region. | |
729 | + const uint8_t* base_ptr = reinterpret_cast<const uint8_t*>( | |
730 | + archive->directory_map.getDataPtr()); | |
731 | + if (ptr < base_ptr || ptr > base_ptr + archive->directory_map.getDataLength()) { | |
732 | + ALOGW("Zip: Invalid entry pointer"); | |
733 | + return kInvalidOffset; | |
734 | + } | |
735 | + | |
736 | + const CentralDirectoryRecord *cdr = | |
737 | + reinterpret_cast<const CentralDirectoryRecord*>(ptr); | |
738 | + | |
739 | + // The offset of the start of the central directory in the zipfile. | |
740 | + // We keep this lying around so that we can sanity check all our lengths | |
741 | + // and our per-file structures. | |
742 | + const off64_t cd_offset = archive->directory_offset; | |
743 | + | |
744 | + // Fill out the compression method, modification time, crc32 | |
745 | + // and other interesting attributes from the central directory. These | |
746 | + // will later be compared against values from the local file header. | |
747 | + data->method = cdr->compression_method; | |
748 | + data->mod_time = cdr->last_mod_time; | |
749 | + data->crc32 = cdr->crc32; | |
750 | + data->compressed_length = cdr->compressed_size; | |
751 | + data->uncompressed_length = cdr->uncompressed_size; | |
752 | + | |
753 | + // Figure out the local header offset from the central directory. The | |
754 | + // actual file data will begin after the local header and the name / | |
755 | + // extra comments. | |
756 | + const off64_t local_header_offset = cdr->local_file_header_offset; | |
757 | + if (local_header_offset + static_cast<off64_t>(sizeof(LocalFileHeader)) >= cd_offset) { | |
758 | + ALOGW("Zip: bad local hdr offset in zip"); | |
759 | + return kInvalidOffset; | |
760 | + } | |
761 | + | |
762 | + return 0; | |
763 | +} | |
764 | +#endif | |
765 | + | |
715 | 766 | static int32_t FindEntry(const ZipArchive* archive, const int ent, |
716 | 767 | ZipEntry* data) { |
717 | 768 | const uint16_t nameLen = archive->hash_table[ent].name_length; |
@@ -881,6 +932,49 @@ struct IterationHandle { | ||
881 | 932 | } |
882 | 933 | }; |
883 | 934 | |
935 | +#ifdef ZIP_NO_INTEGRITY | |
936 | +int32_t NextNoIntegrity(void* cookie, ZipEntry* data, ZipEntryName* name) { | |
937 | + IterationHandle* handle = reinterpret_cast<IterationHandle*>(cookie); | |
938 | + if (handle == NULL) { | |
939 | + return kInvalidHandle; | |
940 | + } | |
941 | + | |
942 | + ZipArchive* archive = handle->archive; | |
943 | + if (archive == NULL || archive->hash_table == NULL) { | |
944 | + ALOGW("Zip: Invalid ZipArchiveHandle"); | |
945 | + return kInvalidHandle; | |
946 | + } | |
947 | + | |
948 | + const uint32_t currentOffset = handle->position; | |
949 | + const uint32_t hash_table_length = archive->hash_table_size; | |
950 | + const ZipEntryName *hash_table = archive->hash_table; | |
951 | + | |
952 | + for (uint32_t i = currentOffset; i < hash_table_length; ++i) { | |
953 | + if (hash_table[i].name != NULL && | |
954 | + (handle->prefix_len == 0 || | |
955 | + (hash_table[i].name_length >= handle->prefix_len && | |
956 | + memcmp(handle->prefix, hash_table[i].name, handle->prefix_len) == 0)) && | |
957 | + (handle->suffix_len == 0 || | |
958 | + (hash_table[i].name_length >= handle->suffix_len && | |
959 | + memcmp(handle->suffix, | |
960 | + hash_table[i].name + hash_table[i].name_length - handle->suffix_len, | |
961 | + handle->suffix_len) == 0))) { | |
962 | + handle->position = (i + 1); | |
963 | + const int error = FindEntryNoIntegrity(archive, i, data); | |
964 | + if (!error) { | |
965 | + name->name = hash_table[i].name; | |
966 | + name->name_length = hash_table[i].name_length; | |
967 | + } | |
968 | + | |
969 | + return error; | |
970 | + } | |
971 | + } | |
972 | + | |
973 | + handle->position = 0; | |
974 | + return kIterationEnd; | |
975 | +} | |
976 | +#endif | |
977 | + | |
884 | 978 | int32_t StartIteration(ZipArchiveHandle handle, void** cookie_ptr, |
885 | 979 | const ZipEntryName* optional_prefix, |
886 | 980 | const ZipEntryName* optional_suffix) { |