1 // Copyright (c) 2008, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 #include "client/windows/crash_generation/crash_generation_server.h"
31 #include <windows.h>
32 #include <cassert>
33 #include <list>
34 #include "client/windows/common/auto_critical_section.h"
35 #include "common/scoped_ptr.h"
36
37 #include "client/windows/crash_generation/client_info.h"
38
39 namespace google_breakpad {
40
41 // Output buffer size.
42 static const size_t kOutBufferSize = 64;
43
44 // Input buffer size.
45 static const size_t kInBufferSize = 64;
46
47 // Access flags for the client on the dump request event.
48 static const DWORD kDumpRequestEventAccess = EVENT_MODIFY_STATE;
49
50 // Access flags for the client on the dump generated event.
51 static const DWORD kDumpGeneratedEventAccess = EVENT_MODIFY_STATE |
52 SYNCHRONIZE;
53
54 // Access flags for the client on the mutex.
55 static const DWORD kMutexAccess = SYNCHRONIZE;
56
57 // Attribute flags for the pipe.
58 static const DWORD kPipeAttr = FILE_FLAG_FIRST_PIPE_INSTANCE |
59 PIPE_ACCESS_DUPLEX |
60 FILE_FLAG_OVERLAPPED;
61
62 // Mode for the pipe.
63 static const DWORD kPipeMode = PIPE_TYPE_MESSAGE |
64 PIPE_READMODE_MESSAGE |
65 PIPE_WAIT;
66
67 // For pipe I/O, execute the callback in the wait thread itself,
68 // since the callback does very little work. The callback executes
69 // the code for one of the states of the server state machine and
70 // the code for all of the states perform async I/O and hence
71 // finish very quickly.
72 static const ULONG kPipeIOThreadFlags = WT_EXECUTEINWAITTHREAD;
73
74 // Dump request threads will, most likely, generate dumps. That may
75 // take some time to finish, so specify WT_EXECUTELONGFUNCTION flag.
76 static const ULONG kDumpRequestThreadFlags = WT_EXECUTEINWAITTHREAD |
77 WT_EXECUTELONGFUNCTION;
78
IsClientRequestValid(const ProtocolMessage & msg)79 static bool IsClientRequestValid(const ProtocolMessage& msg) {
80 return msg.tag == MESSAGE_TAG_UPLOAD_REQUEST ||
81 (msg.tag == MESSAGE_TAG_REGISTRATION_REQUEST &&
82 msg.id != 0 &&
83 msg.thread_id != NULL &&
84 msg.exception_pointers != NULL &&
85 msg.assert_info != NULL);
86 }
87
88 #ifdef _DEBUG
CheckForIOIncomplete(bool success)89 static bool CheckForIOIncomplete(bool success) {
90 // We should never get an I/O incomplete since we should not execute this
91 // unless the operation has finished and the overlapped event is signaled. If
92 // we do get INCOMPLETE, we have a bug in our code.
93 return success ? false : (GetLastError() == ERROR_IO_INCOMPLETE);
94 }
95 #endif
96
CrashGenerationServer(const std::wstring & pipe_name,SECURITY_ATTRIBUTES * pipe_sec_attrs,OnClientConnectedCallback connect_callback,void * connect_context,OnClientDumpRequestCallback dump_callback,void * dump_context,OnClientExitedCallback exit_callback,void * exit_context,OnClientUploadRequestCallback upload_request_callback,void * upload_context,bool generate_dumps,const std::wstring * dump_path)97 CrashGenerationServer::CrashGenerationServer(
98 const std::wstring& pipe_name,
99 SECURITY_ATTRIBUTES* pipe_sec_attrs,
100 OnClientConnectedCallback connect_callback,
101 void* connect_context,
102 OnClientDumpRequestCallback dump_callback,
103 void* dump_context,
104 OnClientExitedCallback exit_callback,
105 void* exit_context,
106 OnClientUploadRequestCallback upload_request_callback,
107 void* upload_context,
108 bool generate_dumps,
109 const std::wstring* dump_path)
110 : pipe_name_(pipe_name),
111 pipe_sec_attrs_(pipe_sec_attrs),
112 pipe_(NULL),
113 pipe_wait_handle_(NULL),
114 server_alive_handle_(NULL),
115 connect_callback_(connect_callback),
116 connect_context_(connect_context),
117 dump_callback_(dump_callback),
118 dump_context_(dump_context),
119 exit_callback_(exit_callback),
120 exit_context_(exit_context),
121 upload_request_callback_(upload_request_callback),
122 upload_context_(upload_context),
123 generate_dumps_(generate_dumps),
124 dump_path_(dump_path ? *dump_path : L""),
125 server_state_(IPC_SERVER_STATE_UNINITIALIZED),
126 shutting_down_(false),
127 overlapped_(),
128 client_info_(NULL),
129 pre_fetch_custom_info_(true) {
130 InitializeCriticalSection(&sync_);
131 }
132
133 // This should never be called from the OnPipeConnected callback.
134 // Otherwise the UnregisterWaitEx call below will cause a deadlock.
~CrashGenerationServer()135 CrashGenerationServer::~CrashGenerationServer() {
136 // New scope to release the lock automatically.
137 {
138 // Make sure no clients are added or removed beyond this point.
139 // Before adding or removing any clients, the critical section
140 // must be entered and the shutting_down_ flag checked. The
141 // critical section is then exited only after the clients_ list
142 // modifications are done and the list is in a consistent state.
143 AutoCriticalSection lock(&sync_);
144
145 // Indicate to existing threads that server is shutting down.
146 shutting_down_ = true;
147 }
148 // No one will modify the clients_ list beyond this point -
149 // not even from another thread.
150
151 // Even if there are no current worker threads running, it is possible that
152 // an I/O request is pending on the pipe right now but not yet done.
153 // In fact, it's very likely this is the case unless we are in an ERROR
154 // state. If we don't wait for the pending I/O to be done, then when the I/O
155 // completes, it may write to invalid memory. AppVerifier will flag this
156 // problem too. So we disconnect from the pipe and then wait for the server
157 // to get into error state so that the pending I/O will fail and get
158 // cleared.
159 DisconnectNamedPipe(pipe_);
160 int num_tries = 100;
161 while (num_tries-- && server_state_ != IPC_SERVER_STATE_ERROR) {
162 Sleep(10);
163 }
164
165 // Unregister wait on the pipe.
166 if (pipe_wait_handle_) {
167 // Wait for already executing callbacks to finish.
168 UnregisterWaitEx(pipe_wait_handle_, INVALID_HANDLE_VALUE);
169 }
170
171 // Close the pipe to avoid further client connections.
172 if (pipe_) {
173 CloseHandle(pipe_);
174 }
175
176 // Request all ClientInfo objects to unregister all waits.
177 // No need to enter the critical section because no one is allowed to modify
178 // the clients_ list once the shutting_down_ flag is set.
179 std::list<ClientInfo*>::iterator iter;
180 for (iter = clients_.begin(); iter != clients_.end(); ++iter) {
181 ClientInfo* client_info = *iter;
182 // Unregister waits. Wait for already executing callbacks to finish.
183 // Unregister the client process exit wait first and only then unregister
184 // the dump request wait. The reason is that the OnClientExit callback
185 // also unregisters the dump request wait and such a race (doing the same
186 // unregistration from two threads) is undesirable.
187 client_info->UnregisterProcessExitWait(true);
188 client_info->UnregisterDumpRequestWaitAndBlockUntilNoPending();
189
190 // Destroying the ClientInfo here is safe because all wait operations for
191 // this ClientInfo were unregistered and no pending or running callbacks
192 // for this ClientInfo can possible exist (block_until_no_pending option
193 // was used).
194 delete client_info;
195 }
196
197 if (server_alive_handle_) {
198 // Release the mutex before closing the handle so that clients requesting
199 // dumps wait for a long time for the server to generate a dump.
200 ReleaseMutex(server_alive_handle_);
201 CloseHandle(server_alive_handle_);
202 }
203
204 if (overlapped_.hEvent) {
205 CloseHandle(overlapped_.hEvent);
206 }
207
208 DeleteCriticalSection(&sync_);
209 }
210
Start()211 bool CrashGenerationServer::Start() {
212 if (server_state_ != IPC_SERVER_STATE_UNINITIALIZED) {
213 return false;
214 }
215
216 server_state_ = IPC_SERVER_STATE_INITIAL;
217
218 server_alive_handle_ = CreateMutex(NULL, TRUE, NULL);
219 if (!server_alive_handle_) {
220 return false;
221 }
222
223 // Event to signal the client connection and pipe reads and writes.
224 overlapped_.hEvent = CreateEvent(NULL, // Security descriptor.
225 TRUE, // Manual reset.
226 FALSE, // Initially nonsignaled.
227 NULL); // Name.
228 if (!overlapped_.hEvent) {
229 return false;
230 }
231
232 // Register a callback with the thread pool for the client connection.
233 if (!RegisterWaitForSingleObject(&pipe_wait_handle_,
234 overlapped_.hEvent,
235 OnPipeConnected,
236 this,
237 INFINITE,
238 kPipeIOThreadFlags)) {
239 return false;
240 }
241
242 pipe_ = CreateNamedPipe(pipe_name_.c_str(),
243 kPipeAttr,
244 kPipeMode,
245 1,
246 kOutBufferSize,
247 kInBufferSize,
248 0,
249 pipe_sec_attrs_);
250 if (pipe_ == INVALID_HANDLE_VALUE) {
251 return false;
252 }
253
254 // Kick-start the state machine. This will initiate an asynchronous wait
255 // for client connections.
256 if (!SetEvent(overlapped_.hEvent)) {
257 server_state_ = IPC_SERVER_STATE_ERROR;
258 return false;
259 }
260
261 // If we are in error state, it's because we failed to start listening.
262 return true;
263 }
264
265 // If the server thread serving clients ever gets into the
266 // ERROR state, reset the event, close the pipe and remain
267 // in the error state forever. Error state means something
268 // that we didn't account for has happened, and it's dangerous
269 // to do anything unknowingly.
HandleErrorState()270 void CrashGenerationServer::HandleErrorState() {
271 assert(server_state_ == IPC_SERVER_STATE_ERROR);
272
273 // If the server is shutting down anyway, don't clean up
274 // here since shut down process will clean up.
275 if (shutting_down_) {
276 return;
277 }
278
279 if (pipe_wait_handle_) {
280 UnregisterWait(pipe_wait_handle_);
281 pipe_wait_handle_ = NULL;
282 }
283
284 if (pipe_) {
285 CloseHandle(pipe_);
286 pipe_ = NULL;
287 }
288
289 if (overlapped_.hEvent) {
290 CloseHandle(overlapped_.hEvent);
291 overlapped_.hEvent = NULL;
292 }
293 }
294
295 // When the server thread serving clients is in the INITIAL state,
296 // try to connect to the pipe asynchronously. If the connection
297 // finishes synchronously, directly go into the CONNECTED state;
298 // otherwise go into the CONNECTING state. For any problems, go
299 // into the ERROR state.
HandleInitialState()300 void CrashGenerationServer::HandleInitialState() {
301 assert(server_state_ == IPC_SERVER_STATE_INITIAL);
302
303 if (!ResetEvent(overlapped_.hEvent)) {
304 EnterErrorState();
305 return;
306 }
307
308 bool success = ConnectNamedPipe(pipe_, &overlapped_) != FALSE;
309 DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
310
311 // From MSDN, it is not clear that when ConnectNamedPipe is used
312 // in an overlapped mode, will it ever return non-zero value, and
313 // if so, in what cases.
314 assert(!success);
315
316 switch (error_code) {
317 case ERROR_IO_PENDING:
318 EnterStateWhenSignaled(IPC_SERVER_STATE_CONNECTING);
319 break;
320
321 case ERROR_PIPE_CONNECTED:
322 EnterStateImmediately(IPC_SERVER_STATE_CONNECTED);
323 break;
324
325 default:
326 EnterErrorState();
327 break;
328 }
329 }
330
331 // When the server thread serving the clients is in the CONNECTING state,
332 // try to get the result of the asynchronous connection request using
333 // the OVERLAPPED object. If the result indicates the connection is done,
334 // go into the CONNECTED state. If the result indicates I/O is still
335 // INCOMPLETE, remain in the CONNECTING state. For any problems,
336 // go into the DISCONNECTING state.
HandleConnectingState()337 void CrashGenerationServer::HandleConnectingState() {
338 assert(server_state_ == IPC_SERVER_STATE_CONNECTING);
339
340 DWORD bytes_count = 0;
341 bool success = GetOverlappedResult(pipe_,
342 &overlapped_,
343 &bytes_count,
344 FALSE) != FALSE;
345 DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
346
347 if (success) {
348 EnterStateImmediately(IPC_SERVER_STATE_CONNECTED);
349 } else if (error_code != ERROR_IO_INCOMPLETE) {
350 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
351 } else {
352 // remain in CONNECTING state
353 }
354 }
355
356 // When the server thread serving the clients is in the CONNECTED state,
357 // try to issue an asynchronous read from the pipe. If read completes
358 // synchronously or if I/O is pending then go into the READING state.
359 // For any problems, go into the DISCONNECTING state.
HandleConnectedState()360 void CrashGenerationServer::HandleConnectedState() {
361 assert(server_state_ == IPC_SERVER_STATE_CONNECTED);
362
363 DWORD bytes_count = 0;
364 memset(&msg_, 0, sizeof(msg_));
365 bool success = ReadFile(pipe_,
366 &msg_,
367 sizeof(msg_),
368 &bytes_count,
369 &overlapped_) != FALSE;
370 DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
371
372 // Note that the asynchronous read issued above can finish before the
373 // code below executes. But, it is okay to change state after issuing
374 // the asynchronous read. This is because even if the asynchronous read
375 // is done, the callback for it would not be executed until the current
376 // thread finishes its execution.
377 if (success || error_code == ERROR_IO_PENDING) {
378 EnterStateWhenSignaled(IPC_SERVER_STATE_READING);
379 } else {
380 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
381 }
382 }
383
384 // When the server thread serving the clients is in the READING state,
385 // try to get the result of the async read. If async read is done,
386 // go into the READ_DONE state. For any problems, go into the
387 // DISCONNECTING state.
HandleReadingState()388 void CrashGenerationServer::HandleReadingState() {
389 assert(server_state_ == IPC_SERVER_STATE_READING);
390
391 DWORD bytes_count = 0;
392 bool success = GetOverlappedResult(pipe_,
393 &overlapped_,
394 &bytes_count,
395 FALSE) != FALSE;
396 if (success && bytes_count == sizeof(ProtocolMessage)) {
397 EnterStateImmediately(IPC_SERVER_STATE_READ_DONE);
398 return;
399 }
400
401 assert(!CheckForIOIncomplete(success));
402 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
403 }
404
405 // When the server thread serving the client is in the READ_DONE state,
406 // validate the client's request message, register the client by
407 // creating appropriate objects and prepare the response. Then try to
408 // write the response to the pipe asynchronously. If that succeeds,
409 // go into the WRITING state. For any problems, go into the DISCONNECTING
410 // state.
HandleReadDoneState()411 void CrashGenerationServer::HandleReadDoneState() {
412 assert(server_state_ == IPC_SERVER_STATE_READ_DONE);
413
414 if (!IsClientRequestValid(msg_)) {
415 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
416 return;
417 }
418
419 if (msg_.tag == MESSAGE_TAG_UPLOAD_REQUEST) {
420 if (upload_request_callback_)
421 upload_request_callback_(upload_context_, msg_.id);
422 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
423 return;
424 }
425
426 scoped_ptr<ClientInfo> client_info(
427 new ClientInfo(this,
428 msg_.id,
429 msg_.dump_type,
430 msg_.thread_id,
431 msg_.exception_pointers,
432 msg_.assert_info,
433 msg_.custom_client_info));
434
435 if (!client_info->Initialize()) {
436 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
437 return;
438 }
439
440 // Issues an asynchronous WriteFile call if successful.
441 // Iff successful, assigns ownership of the client_info pointer to the server
442 // instance, in which case we must be sure not to free it in this function.
443 if (!RespondToClient(client_info.get())) {
444 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
445 return;
446 }
447
448 // This is only valid as long as it can be found in the clients_ list
449 client_info_ = client_info.release();
450
451 // Note that the asynchronous write issued by RespondToClient function
452 // can finish before the code below executes. But it is okay to change
453 // state after issuing the asynchronous write. This is because even if
454 // the asynchronous write is done, the callback for it would not be
455 // executed until the current thread finishes its execution.
456 EnterStateWhenSignaled(IPC_SERVER_STATE_WRITING);
457 }
458
459 // When the server thread serving the clients is in the WRITING state,
460 // try to get the result of the async write. If the async write is done,
461 // go into the WRITE_DONE state. For any problems, go into the
462 // DISONNECTING state.
HandleWritingState()463 void CrashGenerationServer::HandleWritingState() {
464 assert(server_state_ == IPC_SERVER_STATE_WRITING);
465
466 DWORD bytes_count = 0;
467 bool success = GetOverlappedResult(pipe_,
468 &overlapped_,
469 &bytes_count,
470 FALSE) != FALSE;
471 if (success) {
472 EnterStateImmediately(IPC_SERVER_STATE_WRITE_DONE);
473 return;
474 }
475
476 assert(!CheckForIOIncomplete(success));
477 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
478 }
479
480 // When the server thread serving the clients is in the WRITE_DONE state,
481 // try to issue an async read on the pipe. If the read completes synchronously
482 // or if I/O is still pending then go into the READING_ACK state. For any
483 // issues, go into the DISCONNECTING state.
HandleWriteDoneState()484 void CrashGenerationServer::HandleWriteDoneState() {
485 assert(server_state_ == IPC_SERVER_STATE_WRITE_DONE);
486
487 DWORD bytes_count = 0;
488 bool success = ReadFile(pipe_,
489 &msg_,
490 sizeof(msg_),
491 &bytes_count,
492 &overlapped_) != FALSE;
493 DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
494
495 if (success) {
496 EnterStateImmediately(IPC_SERVER_STATE_READING_ACK);
497 } else if (error_code == ERROR_IO_PENDING) {
498 EnterStateWhenSignaled(IPC_SERVER_STATE_READING_ACK);
499 } else {
500 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
501 }
502 }
503
504 // When the server thread serving the clients is in the READING_ACK state,
505 // try to get result of async read. Go into the DISCONNECTING state.
HandleReadingAckState()506 void CrashGenerationServer::HandleReadingAckState() {
507 assert(server_state_ == IPC_SERVER_STATE_READING_ACK);
508
509 DWORD bytes_count = 0;
510 bool success = GetOverlappedResult(pipe_,
511 &overlapped_,
512 &bytes_count,
513 FALSE) != FALSE;
514 if (success) {
515 // The connection handshake with the client is now complete; perform
516 // the callback.
517 if (connect_callback_) {
518 // Note that there is only a single copy of the ClientInfo of the
519 // currently connected client. However it is being referenced from
520 // two different places:
521 // - the client_info_ member
522 // - the clients_ list
523 // The lifetime of this ClientInfo depends on the lifetime of the
524 // client process - basically it can go away at any time.
525 // However, as long as it is referenced by the clients_ list it
526 // is guaranteed to be valid. Enter the critical section and check
527 // to see whether the client_info_ can be found in the list.
528 // If found, execute the callback and only then leave the critical
529 // section.
530 AutoCriticalSection lock(&sync_);
531
532 bool client_is_still_alive = false;
533 std::list<ClientInfo*>::iterator iter;
534 for (iter = clients_.begin(); iter != clients_.end(); ++iter) {
535 if (client_info_ == *iter) {
536 client_is_still_alive = true;
537 break;
538 }
539 }
540
541 if (client_is_still_alive) {
542 connect_callback_(connect_context_, client_info_);
543 }
544 }
545 } else {
546 assert(!CheckForIOIncomplete(success));
547 }
548
549 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
550 }
551
552 // When the server thread serving the client is in the DISCONNECTING state,
553 // disconnect from the pipe and reset the event. If anything fails, go into
554 // the ERROR state. If it goes well, go into the INITIAL state and set the
555 // event to start all over again.
HandleDisconnectingState()556 void CrashGenerationServer::HandleDisconnectingState() {
557 assert(server_state_ == IPC_SERVER_STATE_DISCONNECTING);
558
559 // Done serving the client.
560 client_info_ = NULL;
561
562 overlapped_.Internal = NULL;
563 overlapped_.InternalHigh = NULL;
564 overlapped_.Offset = 0;
565 overlapped_.OffsetHigh = 0;
566 overlapped_.Pointer = NULL;
567
568 if (!ResetEvent(overlapped_.hEvent)) {
569 EnterErrorState();
570 return;
571 }
572
573 if (!DisconnectNamedPipe(pipe_)) {
574 EnterErrorState();
575 return;
576 }
577
578 // If the server is shutting down do not connect to the
579 // next client.
580 if (shutting_down_) {
581 return;
582 }
583
584 EnterStateImmediately(IPC_SERVER_STATE_INITIAL);
585 }
586
EnterErrorState()587 void CrashGenerationServer::EnterErrorState() {
588 SetEvent(overlapped_.hEvent);
589 server_state_ = IPC_SERVER_STATE_ERROR;
590 }
591
EnterStateWhenSignaled(IPCServerState state)592 void CrashGenerationServer::EnterStateWhenSignaled(IPCServerState state) {
593 server_state_ = state;
594 }
595
EnterStateImmediately(IPCServerState state)596 void CrashGenerationServer::EnterStateImmediately(IPCServerState state) {
597 server_state_ = state;
598
599 if (!SetEvent(overlapped_.hEvent)) {
600 server_state_ = IPC_SERVER_STATE_ERROR;
601 }
602 }
603
PrepareReply(const ClientInfo & client_info,ProtocolMessage * reply) const604 bool CrashGenerationServer::PrepareReply(const ClientInfo& client_info,
605 ProtocolMessage* reply) const {
606 reply->tag = MESSAGE_TAG_REGISTRATION_RESPONSE;
607 reply->id = GetCurrentProcessId();
608
609 if (CreateClientHandles(client_info, reply)) {
610 return true;
611 }
612
613 // Closing of remote handles (belonging to a different process) can
614 // only be done through DuplicateHandle.
615 if (reply->dump_request_handle) {
616 DuplicateHandle(client_info.process_handle(), // hSourceProcessHandle
617 reply->dump_request_handle, // hSourceHandle
618 NULL, // hTargetProcessHandle
619 0, // lpTargetHandle
620 0, // dwDesiredAccess
621 FALSE, // bInheritHandle
622 DUPLICATE_CLOSE_SOURCE); // dwOptions
623 reply->dump_request_handle = NULL;
624 }
625
626 if (reply->dump_generated_handle) {
627 DuplicateHandle(client_info.process_handle(), // hSourceProcessHandle
628 reply->dump_generated_handle, // hSourceHandle
629 NULL, // hTargetProcessHandle
630 0, // lpTargetHandle
631 0, // dwDesiredAccess
632 FALSE, // bInheritHandle
633 DUPLICATE_CLOSE_SOURCE); // dwOptions
634 reply->dump_generated_handle = NULL;
635 }
636
637 if (reply->server_alive_handle) {
638 DuplicateHandle(client_info.process_handle(), // hSourceProcessHandle
639 reply->server_alive_handle, // hSourceHandle
640 NULL, // hTargetProcessHandle
641 0, // lpTargetHandle
642 0, // dwDesiredAccess
643 FALSE, // bInheritHandle
644 DUPLICATE_CLOSE_SOURCE); // dwOptions
645 reply->server_alive_handle = NULL;
646 }
647
648 return false;
649 }
650
CreateClientHandles(const ClientInfo & client_info,ProtocolMessage * reply) const651 bool CrashGenerationServer::CreateClientHandles(const ClientInfo& client_info,
652 ProtocolMessage* reply) const {
653 HANDLE current_process = GetCurrentProcess();
654 if (!DuplicateHandle(current_process,
655 client_info.dump_requested_handle(),
656 client_info.process_handle(),
657 &reply->dump_request_handle,
658 kDumpRequestEventAccess,
659 FALSE,
660 0)) {
661 return false;
662 }
663
664 if (!DuplicateHandle(current_process,
665 client_info.dump_generated_handle(),
666 client_info.process_handle(),
667 &reply->dump_generated_handle,
668 kDumpGeneratedEventAccess,
669 FALSE,
670 0)) {
671 return false;
672 }
673
674 if (!DuplicateHandle(current_process,
675 server_alive_handle_,
676 client_info.process_handle(),
677 &reply->server_alive_handle,
678 kMutexAccess,
679 FALSE,
680 0)) {
681 return false;
682 }
683
684 return true;
685 }
686
RespondToClient(ClientInfo * client_info)687 bool CrashGenerationServer::RespondToClient(ClientInfo* client_info) {
688 ProtocolMessage reply;
689 if (!PrepareReply(*client_info, &reply)) {
690 return false;
691 }
692
693 DWORD bytes_count = 0;
694 bool success = WriteFile(pipe_,
695 &reply,
696 sizeof(reply),
697 &bytes_count,
698 &overlapped_) != FALSE;
699 DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
700
701 if (!success && error_code != ERROR_IO_PENDING) {
702 return false;
703 }
704
705 // Takes over ownership of client_info. We MUST return true if AddClient
706 // succeeds.
707 return AddClient(client_info);
708 }
709
710 // The server thread servicing the clients runs this method. The method
711 // implements the state machine described in ReadMe.txt along with the
712 // helper methods HandleXXXState.
HandleConnectionRequest()713 void CrashGenerationServer::HandleConnectionRequest() {
714 // If the server is shutting down, get into ERROR state, reset the event so
715 // more workers don't run and return immediately.
716 if (shutting_down_) {
717 server_state_ = IPC_SERVER_STATE_ERROR;
718 ResetEvent(overlapped_.hEvent);
719 return;
720 }
721
722 switch (server_state_) {
723 case IPC_SERVER_STATE_ERROR:
724 HandleErrorState();
725 break;
726
727 case IPC_SERVER_STATE_INITIAL:
728 HandleInitialState();
729 break;
730
731 case IPC_SERVER_STATE_CONNECTING:
732 HandleConnectingState();
733 break;
734
735 case IPC_SERVER_STATE_CONNECTED:
736 HandleConnectedState();
737 break;
738
739 case IPC_SERVER_STATE_READING:
740 HandleReadingState();
741 break;
742
743 case IPC_SERVER_STATE_READ_DONE:
744 HandleReadDoneState();
745 break;
746
747 case IPC_SERVER_STATE_WRITING:
748 HandleWritingState();
749 break;
750
751 case IPC_SERVER_STATE_WRITE_DONE:
752 HandleWriteDoneState();
753 break;
754
755 case IPC_SERVER_STATE_READING_ACK:
756 HandleReadingAckState();
757 break;
758
759 case IPC_SERVER_STATE_DISCONNECTING:
760 HandleDisconnectingState();
761 break;
762
763 default:
764 assert(false);
765 // This indicates that we added one more state without
766 // adding handling code.
767 server_state_ = IPC_SERVER_STATE_ERROR;
768 break;
769 }
770 }
771
AddClient(ClientInfo * client_info)772 bool CrashGenerationServer::AddClient(ClientInfo* client_info) {
773 HANDLE request_wait_handle = NULL;
774 if (!RegisterWaitForSingleObject(&request_wait_handle,
775 client_info->dump_requested_handle(),
776 OnDumpRequest,
777 client_info,
778 INFINITE,
779 kDumpRequestThreadFlags)) {
780 return false;
781 }
782
783 client_info->set_dump_request_wait_handle(request_wait_handle);
784
785 // OnClientEnd will be called when the client process terminates.
786 HANDLE process_wait_handle = NULL;
787 if (!RegisterWaitForSingleObject(&process_wait_handle,
788 client_info->process_handle(),
789 OnClientEnd,
790 client_info,
791 INFINITE,
792 WT_EXECUTEONLYONCE)) {
793 return false;
794 }
795
796 client_info->set_process_exit_wait_handle(process_wait_handle);
797
798 // New scope to hold the lock for the shortest time.
799 {
800 AutoCriticalSection lock(&sync_);
801 if (shutting_down_) {
802 // If server is shutting down, don't add new clients
803 return false;
804 }
805 clients_.push_back(client_info);
806 }
807
808 return true;
809 }
810
811 // static
OnPipeConnected(void * context,BOOLEAN)812 void CALLBACK CrashGenerationServer::OnPipeConnected(void* context, BOOLEAN) {
813 assert(context);
814
815 CrashGenerationServer* obj =
816 reinterpret_cast<CrashGenerationServer*>(context);
817 obj->HandleConnectionRequest();
818 }
819
820 // static
OnDumpRequest(void * context,BOOLEAN)821 void CALLBACK CrashGenerationServer::OnDumpRequest(void* context, BOOLEAN) {
822 assert(context);
823 ClientInfo* client_info = reinterpret_cast<ClientInfo*>(context);
824
825 CrashGenerationServer* crash_server = client_info->crash_server();
826 assert(crash_server);
827 if (crash_server->pre_fetch_custom_info_) {
828 client_info->PopulateCustomInfo();
829 }
830 crash_server->HandleDumpRequest(*client_info);
831
832 ResetEvent(client_info->dump_requested_handle());
833 }
834
835 // static
OnClientEnd(void * context,BOOLEAN)836 void CALLBACK CrashGenerationServer::OnClientEnd(void* context, BOOLEAN) {
837 assert(context);
838 ClientInfo* client_info = reinterpret_cast<ClientInfo*>(context);
839
840 CrashGenerationServer* crash_server = client_info->crash_server();
841 assert(crash_server);
842
843 crash_server->HandleClientProcessExit(client_info);
844 }
845
HandleClientProcessExit(ClientInfo * client_info)846 void CrashGenerationServer::HandleClientProcessExit(ClientInfo* client_info) {
847 assert(client_info);
848
849 // Must unregister the dump request wait operation and wait for any
850 // dump requests that might be pending to finish before proceeding
851 // with the client_info cleanup.
852 client_info->UnregisterDumpRequestWaitAndBlockUntilNoPending();
853
854 if (exit_callback_) {
855 exit_callback_(exit_context_, client_info);
856 }
857
858 // Start a new scope to release lock automatically.
859 {
860 AutoCriticalSection lock(&sync_);
861 if (shutting_down_) {
862 // The crash generation server is shutting down and as part of the
863 // shutdown process it will delete all clients from the clients_ list.
864 return;
865 }
866 clients_.remove(client_info);
867 }
868
869 // Explicitly unregister the process exit wait using the non-blocking method.
870 // Otherwise, the destructor will attempt to unregister it using the blocking
871 // method which will lead to a deadlock because it is being called from the
872 // callback of the same wait operation
873 client_info->UnregisterProcessExitWait(false);
874
875 delete client_info;
876 }
877
HandleDumpRequest(const ClientInfo & client_info)878 void CrashGenerationServer::HandleDumpRequest(const ClientInfo& client_info) {
879 bool execute_callback = true;
880 // Generate the dump only if it's explicitly requested by the
881 // server application; otherwise the server might want to generate
882 // dump in the callback.
883 std::wstring dump_path;
884 if (generate_dumps_) {
885 if (!GenerateDump(client_info, &dump_path)) {
886 // client proccess terminated or some other error
887 execute_callback = false;
888 }
889 }
890
891 if (dump_callback_ && execute_callback) {
892 std::wstring* ptr_dump_path = (dump_path == L"") ? NULL : &dump_path;
893 dump_callback_(dump_context_, &client_info, ptr_dump_path);
894 }
895
896 SetEvent(client_info.dump_generated_handle());
897 }
898
GenerateDump(const ClientInfo & client,std::wstring * dump_path)899 bool CrashGenerationServer::GenerateDump(const ClientInfo& client,
900 std::wstring* dump_path) {
901 assert(client.pid() != 0);
902 assert(client.process_handle());
903
904 // We have to get the address of EXCEPTION_INFORMATION from
905 // the client process address space.
906 EXCEPTION_POINTERS* client_ex_info = NULL;
907 if (!client.GetClientExceptionInfo(&client_ex_info)) {
908 return false;
909 }
910
911 DWORD client_thread_id = 0;
912 if (!client.GetClientThreadId(&client_thread_id)) {
913 return false;
914 }
915
916 MinidumpGenerator dump_generator(dump_path_,
917 client.process_handle(),
918 client.pid(),
919 client_thread_id,
920 GetCurrentThreadId(),
921 client_ex_info,
922 client.assert_info(),
923 client.dump_type(),
924 true);
925 if (!dump_generator.GenerateDumpFile(dump_path)) {
926 return false;
927 }
928 return dump_generator.WriteMinidump();
929 }
930
931 } // namespace google_breakpad
932