2017-03-29 22:06:16 LIANGJINHUAI 阅读数 2517

1.查看所有进程的内存使用情况

    指令:top


2.动态查看进程号为PID的进程的内存使用情况

    指令:top -d 1 -p PID

如下图是使用top -d 1 -p PID指令查看某进程内存使用情况的结果



3.内容解释:

PID:进程的ID

USER:进程所有者

PR:进程的优先级别,越小越优先被执行

NInice:值

VIRT:进程占用的虚拟内存

RES:进程占用的物理内存

SHR:进程使用的共享内存

S:进程的状态。S表示休眠,R表示正在运行,Z表示僵死状态,N表示该进程优先值为负数

%CPU:进程占用CPU的使用率

%MEM:进程使用的物理内存和总内存的百分比

TIME+:该进程启动后占用的总的CPU时间,即占用CPU使用时间的累加值。

COMMAND:进程启动



2012-07-19 19:16:36 winlinvip 阅读数 4097

Linux多进程服务器真的很给力,赞一个!

Linux多进程一般是master负责侦听,worker接受和伺服client。

一个使用了以下技术的多进程模型:

1. sigset:安全信号,信号屏蔽和接受。

2. epoll:异步io模型。

master进程使用信号模型,侦听用户信号和程序信号,并和worker交流。它的主循环是sigsuspend。

worker进程使用事件模型,使用epoll_wait等待事件,同时他也接受信号(信号会中断epoll_wait)。

// mpserver —— multiple processes server
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
using namespace std;

#include <unistd.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <sys/wait.h>
#include <sys/epoll.h>
#include <string.h>
#include <errno.h>

#define err_exit(msg) cout << "[error] " << msg << endl; exit(1)

struct UserOptions{
    int port;
    int num_processes;
};

void discovery_user_options(int argc, char** argv, UserOptions& options){
    if(argc <= 2){
        cout << "Usage: " << argv[0] << " <port> <num_processes>" << endl
            << "port: the port to listen" << endl
            << "num_processes: the num to start processes. if 0, use single process mode" << endl;
        exit(1);
    }
    
    options.port = atoi(argv[1]);
    options.num_processes = atoi(argv[2]);
}

int listen_server_socket(UserOptions& options){
    int serverfd = socket(AF_INET, SOCK_STREAM, 0);
    
    if(serverfd == -1){
        err_exit("init socket error!");
    }
    cout << "init socket success! #" << serverfd << endl;
    
    int reuse_socket = 1;
    if(setsockopt(serverfd, SOL_SOCKET, SO_REUSEADDR, &reuse_socket, sizeof(int)) == -1){
        err_exit("setsockopt reuse-addr error!");
    }
    cout << "setsockopt reuse-addr success!" << endl;
    
    sockaddr_in addr;
    addr.sin_family = AF_INET;
    addr.sin_port = htons(options.port);
    addr.sin_addr.s_addr = INADDR_ANY;
    if(bind(serverfd, (const sockaddr*)&addr, sizeof(sockaddr_in)) == -1){
        err_exit("bind socket error!");
    }
    cout << "bind socket success!" << endl;
    
    if(listen(serverfd, 10) == -1){
        err_exit("listen socket error!");
    }
    cout << "listen socket success! " << options.port << endl;
    
    return serverfd;
}

void signal_handler(int signo){
    cout << "#" << getpid() << " get a signal:";
    
    bool do_exit = false;
    switch(signo){
        case SIGCHLD:
            cout << "SIGCHLD";
            int status;
            while(waitpid(0, &status, WNOHANG) > 0){
            }
            break;
        case SIGALRM:
            cout << "SIGALRM";
            break;
        case SIGIO:
            cout << "SIGIO";
            break;
        case SIGINT:
            cout << "SIGINT";
            do_exit = true;
            break;
        case SIGHUP:
            cout << "SIGHUP";
            do_exit = true;
            break;
        case SIGTERM:
            cout << "SIGTERM";
            do_exit = true;
            break;
        case SIGQUIT:
            cout << "SIGQUIT";
            do_exit = true;
            break;
        case SIGUSR1:
            cout << "SIGUSR1";
            break;
        case SIGUSR2:
            cout << "SIGUSR2";
            break;
    }
    
    cout << "!" << endl;
    if(do_exit){
        exit(0);
    }
}

void register_signal_handler_imp(int signum, void (*handler)(int)){
    struct sigaction action;
    
    action.sa_handler = handler;
    sigemptyset(&action.sa_mask);
    //action.sa_flags = 0;
    
    if(sigaction(signum, &action, NULL) == -1){
        err_exit("register signal handler failed!");
    }
}
void register_signal_handler(){
    register_signal_handler_imp(SIGCHLD, signal_handler);
    register_signal_handler_imp(SIGALRM, signal_handler);
    register_signal_handler_imp(SIGIO, signal_handler);
    register_signal_handler_imp(SIGINT, signal_handler);
    register_signal_handler_imp(SIGHUP, signal_handler);
    register_signal_handler_imp(SIGTERM, signal_handler);
    register_signal_handler_imp(SIGQUIT, signal_handler);
    register_signal_handler_imp(SIGUSR1, signal_handler);
    register_signal_handler_imp(SIGUSR2, signal_handler);
    cout << "register signal handler success!" << endl;
}

void block_specified_signals(){
    sigset_t set;
    
    sigemptyset(&set);
    
    sigaddset(&set, SIGCHLD);
    sigaddset(&set, SIGALRM);
    sigaddset(&set, SIGIO);
    sigaddset(&set, SIGINT);
    sigaddset(&set, SIGHUP);
    sigaddset(&set, SIGTERM);
    sigaddset(&set, SIGQUIT);
    sigaddset(&set, SIGUSR1);
    sigaddset(&set, SIGUSR2);
    
    if(sigprocmask(SIG_BLOCK, &set, NULL) == -1){
        err_exit("sigprocmask block signal failed!");
    }
    cout << "sigprocmask block signal success!" << endl;
}

void unblock_all_signals(){
    sigset_t set;
    sigemptyset(&set);
    
    // the master process block all signals, we unblock all for we use epoll to wait events.
    if(sigprocmask(SIG_SETMASK, &set, NULL) == -1){
        err_exit("sigprocmask block signal failed!");
    }
    cout << "sigprocmask block signal success!" << endl;
}

void epoll_add_event(int ep, int fd){
    epoll_event ee;
    ee.events = EPOLLIN;
    ee.data.fd = fd;
    if(epoll_ctl(ep, EPOLL_CTL_ADD, fd, &ee) == -1){
        err_exit("epoll add event failed!");
    }
    cout << "epoll add fd success: #" << fd << endl;
}
void epoll_remove_event(int ep, int fd){
    if(epoll_ctl(ep, EPOLL_CTL_DEL, fd, NULL) == -1){
        err_exit("epoll remove event failed! fd=" << fd << ", errno=0x" << hex << errno << dec << " " << strerror(errno));
    }
    cout << "epoll remove fd success: #" << fd << endl;
}

void close_client(int ep, int fd){
    cout << "the client dead, remove it: #" << fd << endl;
    epoll_remove_event(ep, fd);
    close(fd);
}

void worker_get_event(int ep, int serverfd, epoll_event& active){
    int fd = active.data.fd;
    
    // server listening socket event.
    if(fd == serverfd){
        int client = accept(serverfd, NULL, 0);
        if(client == -1){
            err_exit("accept client socket error!");
        }
        cout << "get a client: #" << client << endl;
        epoll_add_event(ep, client);
        return;
    }
    
    // client event.
    if((active.events & EPOLLHUP) == EPOLLHUP || (active.events & EPOLLERR) == EPOLLERR){
        cout << "get a EPOLLHUP or EPOLLERR event from client #" << fd << endl;
        close_client(ep, fd);
        return;
    }
    if((active.events & EPOLLIN) == EPOLLIN){
        if(true){
            cout << "get a EPOLLIN event from client #" << fd << endl;
            char buf[1024];
            memset(buf, 0, sizeof(buf));
            if(recv(fd, buf, sizeof(buf), 0) <= 0){
                close_client(ep, fd);
                return;
            }
            cout << "recv from client: " << buf << endl;
        }
        if(true){
            char msg[] = "hello, server ping!";
            if(send(fd, msg, sizeof(msg), 0) <= 0){
                close_client(ep, fd);
                return;
            }
        }
        return;
    }
}

void worker_process_cycle(int serverfd){
    cout << "start worker process cycle" << endl;
    unblock_all_signals();

    int ep = epoll_create(1024);
    if(ep == -1){
        err_exit("epoll_create failed!");
    }
    cout << "epoll create success!" << endl;
    epoll_add_event(ep, serverfd);
    
    for(;;){
        epoll_event events[1024];
        int incoming = epoll_wait(ep, events, 1024, -1);
        if(incoming == -1){
            break;
        }
        for(int i = 0; i < incoming; i++){
            worker_get_event(ep, serverfd, events[i]);
        }
    }
    
    cout << "worker process exited" << endl;
    close(ep);
}

void start_worker_process(int serverfd){
    pid_t pid = fork();
    
    if(pid == -1){
        err_exit("fork process failed");
    }
    
    if(pid == 0){
        worker_process_cycle(serverfd);
        exit(0);
    }
    
    cout << "fork process success: #" << pid << endl;
}

int main(int argc, char** argv){
    register_signal_handler();
    block_specified_signals();
    
    //sleep(3);
    UserOptions options;
    discovery_user_options(argc, argv, options);
    int serverfd = listen_server_socket(options);
    
    for(int i = 0; i < options.num_processes; i++){
        start_worker_process(serverfd);
    }
    if(options.num_processes == 0){
        worker_process_cycle(serverfd);
    }
    
    sigset_t set;
    sigemptyset(&set);
    for(;;){
        sigsuspend(&set);
    }

    return 0;
}

// mpclient —— multiple processes client
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
using namespace std;

#include <unistd.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <string.h>

#define err_exit(msg) cout << "[error] " << msg << endl; exit(1)

struct UserOptions{
    char* server_ip;
    int port;
    int num_processes;
};

void discovery_user_options(int argc, char** argv, UserOptions& options){
    if(argc <= 2){
        cout << "Usage: " << argv[0] << " <server_ip> <port> <num_processes>" << endl
            << "server_ip: the ip address of server" << endl
            << "port: the port to connect at" << endl
            << "num_processes: the num to start processes. if 0, use single process mode" << endl;
        exit(1);
    }
    
    options.server_ip = argv[1];
    options.port = atoi(argv[2]);
    options.num_processes = atoi(argv[3]);
}

int connect_server_socket(UserOptions& options){
    int clientfd = socket(AF_INET, SOCK_STREAM, 0);
    
    if(clientfd == -1){
        err_exit("init socket error!");
    }
    cout << "init socket success!" << endl;
    
    sockaddr_in addr;
    addr.sin_family = AF_INET;
    addr.sin_port = htons(options.port);
    addr.sin_addr.s_addr = inet_addr(options.server_ip);
    if(connect(clientfd, (const sockaddr*)&addr, sizeof(sockaddr_in)) == -1){
        err_exit("connect socket error!");
    }
    cout << "connect socket success!" << endl;
    
    return clientfd;
}

int main(int argc, char** argv){
    UserOptions options;
    discovery_user_options(argc, argv, options);
    int* fds = new int[options.num_processes];
    for(int i = 0; i < options.num_processes; i++){
        fds[i] = connect_server_socket(options);
    }
    
    bool do_loop = true;
    while(do_loop){
        for(int i = 0; i < options.num_processes; i++){
            int fd = fds[i];
            if(true){
                char msg[] = "hello, client ping!";
                if(send(fd, msg, sizeof(msg), 0) <= 0){
                    close(fd);
                    do_loop = false;
                    break;
                }
            }
            if(true){
                char buf[1024];
                memset(buf, 0, sizeof(buf));
                if(recv(fd, buf, sizeof(buf), 0) <= 0){
                    close(fd);
                    do_loop = false;
                    break;
                }
                cout << "recv from server: " << buf << endl;
            }
            
            sleep(3);
        }
    }
    
    delete[] fds;

    return 0;
}


master启动的主要过程:

1. 注册信号处理函数:sigaction。

2. 屏蔽所有关心的信号,让内核暂时保留信号,不要发给程序,因为这时候程序不系统被打断。

3. 侦听端口。

4. 启动worker进程。

5. 等待信号的到来:sigsuspend。


worker启动的主要过程:

1. 取消信号的屏蔽。因为这时候worker拷贝的是master的内存结构,所以信号屏蔽也拷贝过来了。若不取消,则无法收到退出消息,只能重启服务器或它自己退出才能杀这个进程了。

2. 创建epoll。

3. 将侦听的server_socket_fd添加到epoll。

4. 等待epoll事件的到来:epoll_wait。或者信号也能中断这个epoll_wait:返回-1.


典型的多进程服务器模型如下图所示:


考虑一个RTMP服务器,假设和FMS类似以vhost为边界启动worker,但是client应该连接到指定的worker。所以这个典型的模型就需要修改了。

有一种方式是master接受连接,handshake,然后接收到clieint的connect请求后,知道连接的vhost是哪个,再把这个client fd转发给worker。如下图所示:


但这种方式master过于复杂,它实际上需要做worker要做的事情。多进程稳定的一个原因是进程的自然边界,worker进程崩溃不会影响其他worker和master,如果中心master崩溃,那么系统就完蛋了。所以master要尽量简单不能有过多的逻辑。另外,单进程和多进程切换的一个条件,就是worker能用一个函数表达,单进程时直接调用这个函数,而多进程时fork后调用这个函数;若master要混合worker的功能,单进程时也需要过多交互。

改进如下图:


master变得简单,worker功能也单一了。worker发现不是自己的client时,将client发给给master,由master分发给特定的worker。单进程时没有分发这一步了;多进程时master也只需要分发fd,而不需要处理协议的逻辑。

在master和worker之间传递fd不是一个好办法,因为过多频繁的SIGIO可能导致“I/O possible”;google上找了一下说是由于信号SIGIO没有处理引起,但明显已经侦听了handler;有人说是signal queue overflow,恩,这个很像!

socketpaire有时还会引起“[write_channel] sendmsg error, ret=-1, errno=0xb Resource temporarily unavailable”错误。管道错误。一般发生在worker进程中,如果多个woker进程都向master发送数据,而master处理一个数据后sleep或者做其他的事情去了,就会造成这种情况(master的socketpaire缓冲区满了)。

master若需要和worker使用socketpair通信,master使用信号来实现异步,当worker的socketpair有SIGIO信号时,说明worker在发数据,如何判断是哪个worker发的呢?可以用select,当然不太好;可以使用F_SETSIG来更改默认的SIGIO,改为SIGRTMIN+fd,当worker有数据时,收到的信号就包含了fd了。

获取SIGIO的fd的另外一个方法,是使用siginfo_t取得fd。使用F_SETSIG可以设置的SIG较少,SIGRTMAX为64,所以最多可用32个自定义实时信号,一般用来对SIGIO分类。必须将SIGIO定义为实时信号,然后在sigaction时设置SA_SIGINFO和使用void (*handler)(int signo, siginfo_t* info, void* context),info->si_fd就是SIGIO的fd。

以下是一个糅合了多进程、信号、socketpair、passing fd的一个原型,较稳定的一个原型:

// mpserver2 —— multiple processes server
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <iostream>
#include <vector>
using namespace std;

#include <unistd.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <sys/wait.h>
#include <sys/epoll.h>
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/ioctl.h>

#define info(msg) cout << "#" << getpid() << " " << msg
#define err_exit(msg) info("[error] " << msg << ", errno=0x" << hex << errno << dec << " " << strerror(errno) << endl); exit(-1)

// define the SIGIO to RTSig, the SIGIO now means rt-sig queue full.
#define SIG_SOCKET_IO SIGRTMIN+1

bool global_terminate = false;
bool global_channel_msg = false;
// when global_channel_msg is true, the current_active_io_fd set to the fd.
int current_active_io_fd = -1;

int current_client_num = 0;
bool accepting = true;

// the num of client for a proecess to serve.
// 2+N
// 2: the serverfd, the channelfd
// N: clients.
#define max_clients_per_process 2+1000
// the physical max clients, if exceed this value, error!
#define physical_max_clients 1024

#define max_workers 100

struct UserOptions{
    int port;
    int num_processes;
};

void discovery_user_options(int argc, char** argv, UserOptions& options){
    if(argc <= 2){
        cout << "Usage: " << argv[0] << " <port> <num_processes>" << endl
            << "port: the port to listen" << endl
            << "num_processes: the num to start processes. if 0, use single process mode" << endl
            << "for example: " << argv[0] << " 1990 20" << endl;
        exit(1);
    }
    
    options.port = atoi(argv[1]);
    options.num_processes = atoi(argv[2]);
}

#define CMD_FD 100
#define CMD_INFO 200
struct channel_msg{
    int command;
    int fd; // the fd, set to -1 if no fd.
    int total_client; // the total clients number of specified process.
    int param; // the param.
};
void write_channel(int sock, channel_msg* data, int size)
{
    msghdr msg;
    
    // init msg_control
    if(data->fd == -1){
        msg.msg_control = NULL;
        msg.msg_controllen = 0;
    }
    else{
        union {
            struct cmsghdr cm;
            char space[CMSG_SPACE(sizeof(int))];
        } cmsg;
        memset(&cmsg, 0, sizeof(cmsg));
        
        cmsg.cm.cmsg_level = SOL_SOCKET;
        cmsg.cm.cmsg_type = SCM_RIGHTS; // we are sending fd.
        cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int));
        
        msg.msg_control = (cmsghdr*)&cmsg;
        msg.msg_controllen = sizeof(cmsg);
        *(int *)CMSG_DATA(&cmsg.cm) = data->fd;
    }
    
    // init msg_iov
    iovec iov[1];
    iov[0].iov_base = data;
    iov[0].iov_len  = size;
    
    msg.msg_iov = iov;
    msg.msg_iovlen = 1;
    
    // init msg_name
    msg.msg_name = NULL;
    msg.msg_namelen = 0;
    
    info("[write_channel][info] data fd=" << data->fd << ", command=" 
        << data->command << ", total_client=" << data->total_client 
        << ", param=" << data->param << endl);
        
    while(true){
        ssize_t ret;
        if ((ret = sendmsg(sock, &msg, 0)) <= 0){
            // errno = 0xb Resource temporarily unavailable
            // donot retry, for it's no use, error infinite loop.
            err_exit("[write_channel] sendmsg error, ret=" << ret);
        }
        break;
    }
}
void read_channel(int sock, channel_msg* data, int size)
{
    msghdr msg;
    
    // msg_iov
    iovec iov[1];
    iov[0].iov_base = data;
    iov[0].iov_len = size;
    
    msg.msg_iov = iov;
    msg.msg_iovlen  = 1;
    
    // msg_name
    msg.msg_name = NULL;
    msg.msg_namelen = 0;
    
    // msg_control
    union { // union to create a 8B aligned memory.
        struct cmsghdr cm; // 16B = 8+4+4
        char space[CMSG_SPACE(sizeof(int))]; // 24B = 16+4+4
    } cmsg;
    memset(&cmsg, 0, sizeof(cmsg));
    
    msg.msg_control = (cmsghdr*)&cmsg;
    msg.msg_controllen = sizeof(cmsg);
    
    ssize_t ret;
    if ((ret = recvmsg(sock, &msg, 0)) == -1) {
        err_exit("[read_channel] recvmsg error");
    }
    if(ret == 0){
        info("[read_channel] connection closed" << endl);
        exit(0);
    }
    
    info("[read_channel][info] data fd=" << data->fd << ", command=" 
        << data->command << ", total_client=" << data->total_client 
        << ", param=" << data->param << endl);
    if(data->command == CMD_FD){
        int fd = *(int *)CMSG_DATA(&cmsg.cm);
        if(fd == 0 || fd == 1){
            err_exit("[read_channel] recvmsg invalid fd=" << fd 
                << ", data->fd=" << data->fd << ", command=" << data->command
                << ", ret=" << ret);
        }
        data->fd = fd;
    }
    else{
        data->fd = -1;
    }
}

void set_noblocking(int fd){
    int flag = 1;
    if(ioctl(fd, FIONBIO, &flag) == -1){
        err_exit("[master] ioctl failed!");
    }
    info("[master] server socket no-block flag set success" << endl);
}

int listen_server_socket(UserOptions& options){
    int serverfd = socket(AF_INET, SOCK_STREAM, 0);
    
    if(serverfd == -1){
        err_exit("[master] init socket error!");
    }
    info("[master] init socket success! #" << serverfd << endl);
    
    int reuse_socket = 1;
    if(setsockopt(serverfd, SOL_SOCKET, SO_REUSEADDR, &reuse_socket, sizeof(int)) == -1){
        err_exit("[master] setsockopt reuse-addr error!");
    }
    info("[master] setsockopt reuse-addr success!" << endl);
    
    set_noblocking(serverfd);
    
    sockaddr_in addr;
    addr.sin_family = AF_INET;
    addr.sin_port = htons(options.port);
    addr.sin_addr.s_addr = INADDR_ANY;
    if(bind(serverfd, (const sockaddr*)&addr, sizeof(sockaddr_in)) == -1){
        err_exit("[master] bind socket error!");
    }
    info("[master] bind socket success!" << endl);
    
    if(listen(serverfd, 10) == -1){
        err_exit("[master] listen socket error!");
    }
    info("[master] listen socket success! " << options.port << endl);
    
    return serverfd;
}

// both master and worker will invoke this handler.
void signal_handler(int signo, siginfo_t* info, void* context){
    info("#" << getpid() << " get a signal:");
    
    if(signo == SIG_SOCKET_IO){
        cout << "SIG_SOCKET_IO!" << endl;
        global_channel_msg = true;
        current_active_io_fd = info->si_fd;
        info("[master] get SIGIO: fd=" << info->si_fd << endl);
        return;
    }
    
    switch(signo){
        case SIGCHLD:
            cout << "SIGCHLD";
            break;
        case SIGALRM:
            cout << "SIGALRM";
            break;
        case SIGIO:
            cout << "SIGIO";
            info("[master][warning] ignore the SIGIO: rt-sig queue is full!" << endl);
            break;
        case SIGINT:
            cout << "SIGINT";
            global_terminate = true;
            break;
        case SIGHUP:
            cout << "SIGHUP";
            global_terminate = true;
            break;
        case SIGTERM:
            cout << "SIGTERM";
            global_terminate = true;
            break;
        case SIGQUIT:
            cout << "SIGQUIT";
            global_terminate = true;
            break;
        case SIGUSR1:
            cout << "SIGUSR1";
            break;
        case SIGUSR2:
            cout << "SIGUSR2";
            break;
    }
    
    cout << "!" << endl;
}

void register_signal_handler_imp(int signum, void (*handler)(int, siginfo_t*, void*)){
    struct sigaction action;
    
    action.sa_flags = SA_SIGINFO;
    action.sa_sigaction = handler;
    sigemptyset(&action.sa_mask);
    //action.sa_flags = 0;
    
    if(sigaction(signum, &action, NULL) == -1){
        err_exit("[master] register signal handler failed!");
    }
}
void register_signal_handler(){
    register_signal_handler_imp(SIGCHLD, signal_handler);
    register_signal_handler_imp(SIGALRM, signal_handler);
    register_signal_handler_imp(SIGIO, signal_handler);
    register_signal_handler_imp(SIG_SOCKET_IO, signal_handler);
    register_signal_handler_imp(SIGINT, signal_handler);
    register_signal_handler_imp(SIGHUP, signal_handler);
    register_signal_handler_imp(SIGTERM, signal_handler);
    register_signal_handler_imp(SIGQUIT, signal_handler);
    register_signal_handler_imp(SIGUSR1, signal_handler);
    register_signal_handler_imp(SIGUSR2, signal_handler);
    info("[master] register signal handler success!" << endl);
}

void block_specified_signals(){
    sigset_t set;
    
    sigemptyset(&set);
    
    sigaddset(&set, SIGCHLD);
    sigaddset(&set, SIGALRM);
    sigaddset(&set, SIGIO);
    sigaddset(&set, SIG_SOCKET_IO);
    sigaddset(&set, SIGINT);
    sigaddset(&set, SIGHUP);
    sigaddset(&set, SIGTERM);
    sigaddset(&set, SIGQUIT);
    sigaddset(&set, SIGUSR1);
    sigaddset(&set, SIGUSR2);
    
    if(sigprocmask(SIG_BLOCK, &set, NULL) == -1){
        err_exit("[master] sigprocmask block signal failed!");
    }
    info("[master] sigprocmask block signal success!" << endl);
}

void unblock_all_signals(){
    sigset_t set;
    sigemptyset(&set);
    
    // the master process block all signals, we unblock all for we use epoll to wait events.
    if(sigprocmask(SIG_SETMASK, &set, NULL) == -1){
        err_exit("[worker] sigprocmask unblock signal failed!");
    }
    info("[worker] sigprocmask unblock signal success!" << endl);
}

void epoll_add_event(int ep, int fd){
    epoll_event ee;
    ee.events = EPOLLIN;
    ee.data.fd = fd;
    if(epoll_ctl(ep, EPOLL_CTL_ADD, fd, &ee) == -1){
        err_exit("[worker] epoll add event failed!");
    }
    current_client_num ++;
    info("[worker] epoll add fd success: #" << fd << ", current_client_num=" << current_client_num << endl);
}
void epoll_remove_event(int ep, int fd){
    if(epoll_ctl(ep, EPOLL_CTL_DEL, fd, NULL) == -1){
        err_exit("[worker] epoll remove event failed! fd=" << fd << ", errno=0x" << hex << errno << dec << " " << strerror(errno));
    }
    current_client_num --;
    info("[worker] epoll remove fd success: #" << fd << ", current_client_num=" << current_client_num << endl);
}

void close_client(int ep, int fd, int serverfd, int worker_channel, bool do_report = true){
    info("[worker] the client dead, remove it: #" << fd << endl);
    epoll_remove_event(ep, fd);
    close(fd);
    
    if(serverfd == fd){
        info("[worker] warning to close the server fd" << endl);
    }
    
    // start accept again.
    if(!accepting && current_client_num < max_clients_per_process){
        epoll_add_event(ep, serverfd);
        accepting = true;
    }
    
    // notice the master: worker can take more.
    if(do_report){
        channel_msg msg;
        msg.command = CMD_INFO;
        msg.total_client = current_client_num;
        msg.fd = -1;
        msg.param = 0;
        write_channel(worker_channel, &msg, sizeof(channel_msg));
    }
}

void worker_get_event(int ep, int serverfd, epoll_event& active, int worker_channel){
    info("[worker] process #" << getpid() << " current_client_num=" << current_client_num << ", worker_channel=" << worker_channel << endl);
    int fd = active.data.fd;
    
    // server listening socket event.
    if(fd == serverfd){
        if(current_client_num < physical_max_clients){
            int client = accept(serverfd, NULL, 0);
            if(client == -1){
                // thundering herd
                // http://www.citi.umich.edu/projects/linux-scalability/reports/accept.html
                if(errno == EAGAIN || errno == EWOULDBLOCK){
                    info("[worker][warning] get a thundering herd at #" << getpid() << endl);
                    return;
                }
                err_exit("[worker] accept client socket error!");
            }
            
            int reuse_socket = 1;
            if(setsockopt(client, SOL_SOCKET, SO_REUSEADDR, &reuse_socket, sizeof(int)) == -1){
                err_exit("[worker] setsockopt reuse-addr error!");
            }
            info("[worker] setsockopt reuse-addr success!" << endl);
            
            info("[worker] process #" << getpid() << " get a client: #" << client << endl);
            epoll_add_event(ep, client);
        }
        else{
            epoll_remove_event(ep, fd); // donot accept.
            accepting = false;
            info("[worker] warning, the worker #" << getpid() 
                << " exceed the max clients, current_client_num=" << current_client_num << endl);
        }
        return;
    }
    
    // channel event.
    if(fd == worker_channel){
        channel_msg msg;
        read_channel(worker_channel, &msg, sizeof(channel_msg));
        info("[worker] get a channel message. fd=" << msg.fd << ", command=" << msg.command
            << ", current_client_num=" << current_client_num << ", max_clients_per_process=" << max_clients_per_process
            << ", physical_max_clients=" << physical_max_clients << endl);
        if(msg.command == CMD_FD && msg.fd != -1){
            if(msg.fd == 0 || msg.fd == 1){
                info("[worker][warning] ignore the invalid passing fd=" << msg.fd << endl);
                return;
            }
            // accept all client from master, util the read overloaded.
            if(current_client_num < physical_max_clients){
                epoll_add_event(ep, msg.fd);
                info("[worker] get a dispatched client. #" << msg.fd << endl);
            }
            else{
                info("[worker][warning] worker overloaded, close the fd: " << msg.fd << endl);
                close(msg.fd);
            }
        }
        else{
            err_exit("[worker] get a invalid channel");
        }
        return;
    }
    
    // client event.
    if((active.events & EPOLLHUP) == EPOLLHUP || (active.events & EPOLLERR) == EPOLLERR){
        info("[worker] get a EPOLLHUP or EPOLLERR event from client #" << fd << endl);
        close_client(ep, fd, serverfd, worker_channel);
        return;
    }
    if((active.events & EPOLLIN) == EPOLLIN){
        if(true){
            info("[worker] get a EPOLLIN event from client #" << fd << endl);
            char ch_control; // we MUST not read more bytes, for the fd maybe passing to other process!
            if(recv(fd, &ch_control, sizeof(char), 0) <= 0){
                close_client(ep, fd, serverfd, worker_channel);
                return;
            }
            // check the first byte, if 'M' it's message, if 'C' it's control message.
            if(ch_control == 'C'){
                int client_required_id; //worker_channel
                if(recv(fd, &client_required_id, sizeof(int), 0) <= 0){
                    close_client(ep, fd, serverfd, worker_channel);
                    return;
                }
            
                pid_t pid = getpid();
                info("[worker] get client control message: client_required_id=" << client_required_id << endl);
                if((client_required_id % pid) == 0){
                    info("[worker][verified] client exactly required this server" << endl);
                }
                else{
                    // never send the follows:
                    if(fd == 0 || fd == 1 || fd == serverfd || fd == worker_channel){
                        info("[worker][warning] ignore invalid fd to passing: " << fd << endl);
                        close_client(ep, fd, serverfd, worker_channel);
                        return;
                    }
                    info("[worker] send to master to dispatch it" << endl);
                    channel_msg msg;
                    msg.fd = fd;
                    msg.command = CMD_FD;
                    msg.total_client = current_client_num;
                    msg.param = client_required_id;
                    write_channel(worker_channel, &msg, sizeof(channel_msg));
                    // donot report to master, for the CMD_FD message has reported.
                    close_client(ep, fd, serverfd, worker_channel, false);
                    return;
                }
            }
            else{
                char buf[1024];
                 if(recv(fd, buf, sizeof(buf), 0) <= 0){
                    close_client(ep, fd, serverfd, worker_channel);
                    return;
                }
               info("[worker] get client data message: " << buf << endl);
            }
        }
        if(true){
            char msg[] = "hello, server ping!";
            if(send(fd, msg, sizeof(msg), 0) <= 0){
                close_client(ep, fd, serverfd, worker_channel);
                return;
            }
        }
        return;
    }
}

void worker_process_cycle(int serverfd, int worker_channel){
    cout << "[worker] start worker process cycle. serverfd=" << serverfd 
        << ", worker_channel=" << worker_channel 
        << ((worker_channel == -1)? "(single process)" : "(multiple processes)")<< endl;
    unblock_all_signals();

    int ep = epoll_create(1024);
    if(ep == -1){
        err_exit("[worker] epoll_create failed!");
    }
    info("[worker] epoll create success!" << endl);
    epoll_add_event(ep, serverfd);
    epoll_add_event(ep, worker_channel);
    
    // worker use epoll event, the signal handler will break the epoll_wait to return -1.
    for(;;){
        epoll_event events[1024];
        int incoming = epoll_wait(ep, events, 1024, -1);
        
        // get a event or error.
        if(incoming == -1){
            if(global_terminate){
                close(worker_channel);
                close(serverfd);
                info("[worker] worker process exit" << endl);
                exit(0);
            }
            break;
        }
        
        for(int i = 0; i < incoming; i++){
            worker_get_event(ep, serverfd, events[i], worker_channel);
        }
    }
    
    info("[worker] worker process exited" << endl);
    close(ep);
}

struct WorkerProcess{
    pid_t pid;
    int channel;
    int num_clients;
};

void start_worker_process(int serverfd, WorkerProcess& worker_process){
    // master/worker use domain socket to communicate.
    int fds[2];
    if(socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == -1){
        err_exit("[master] sockpair create domain socket failed");
    }
    info("[master] sockpair create domain socket success! [" << fds[0] << ", " << fds[1] << "]" << endl);
    
    // master process, use the fds[0].
    worker_process.num_clients = 0;
    worker_process.channel = fds[0];
    
    /**
    * SIGIO (http://www.lindevdoc.org/wiki/SIGIO)
    * The SIGIO signal is sent to a process to notify it about some event on a file descriptor, namely:
    *   the file descriptor is ready to receive data (previous write has finished).
    *   the file descriptor has new data to read
    *   an error has occurred
    * This signal is not sent by default; it must be enabled by specifying the O_ASYNC flag to the descriptor
    *
    * O_ASYNC (http://www.lindevdoc.org/wiki/O_ASYNC)
    * The O_ASYNC flag specifies that the process that owns a data stream (by default the process that opened it, 
    * but can be changed with fcntl() with the F_SETOWN command) will receive a signal (SIGIO by default, but can 
    * be changed with fcntl() with the F_SETSIG command) when the file is ready for reading or writing.
    */
    // set the socket owner to master, to get a SIGIO signal.
    if(fcntl(worker_process.channel, F_SETOWN, getpid()) == -1){
        err_exit("[master] fcntl F_SETOWN to master failed");
    }
    info("[master] fcntl F_SETOWN to master success" << endl);
    // set socket to async to singal SIGIO when data coming.
    int on = 1;
    if(ioctl(worker_process.channel, FIOASYNC, &on) == -1){
        err_exit("[master] ioctl set FIOASYNC failed");
    }
    info("[master] ioctl set FIOASYNC success" << endl);
    /** F_SETSIG
    * if donot use set_sig, we must use select to find which fd is read.
    * http://ajaxxx.livejournal.com/62378.html
    * Linux has the additional feature of fcntl(F_SETSIG), which lets you get the file descriptor that generated the 
    * signal back as part of the signal information. So I implemented this, with the obvious semantics: receiving a 
    * SIGIO would call the handler for just that file descriptor and then return.
    * remark: the SIGRTMIN=34, SIGRTMAX=62, so only 28 fd cannbe discoveried. so it's used for category signal, 
    *       and we use SA_SIGINFO for sigaction to get the fd of SIGIO. 
    */
    if(fcntl(worker_process.channel, F_SETSIG, SIG_SOCKET_IO) == -1){
        err_exit("[master] fcntl set rtsig failed: F_SETSIG.");
    }
    info("[master] fcntl set rtsig success" << endl);
    // set to unblocking.
    set_noblocking(worker_process.channel);
    set_noblocking(fds[1]);
    
    // start process by fork.
    pid_t pid = fork();
    
    if(pid == -1){
        err_exit("[master] fork process failed");
    }
    
    // worker process, use the fds[1].
    if(pid == 0){
        close(fds[0]);
        worker_process_cycle(serverfd, fds[1]);
        exit(0);
    }
    // master process.
    worker_process.pid = pid;
    close(fds[1]);
    
    info("[master] fork process success: #" << pid << ", channel=" << worker_process.channel << endl);
}

int find_the_required_worker(int serverfd, vector<WorkerProcess>& workers, int client_required_id, int pre_pid){
    // find the exactly worker.
    WorkerProcess* target = NULL;
    int channel = -1;
    for(vector<WorkerProcess>::iterator ite = workers.begin(); ite != workers.end(); ++ite){
        WorkerProcess& worker = *ite;
        if((client_required_id % worker.pid) == 0){
            target = &worker;
            break;
        }
    }
    // find the most idling worker.
    int pre_num = 0;
    for(vector<WorkerProcess>::iterator ite = workers.begin(); ite != workers.end(); ++ite){
        WorkerProcess& worker = *ite;
        if(worker.num_clients < max_clients_per_process && worker.pid != pre_pid){
            if(pre_num == 0 || worker.num_clients < pre_num){
                target = &worker;
                pre_num = worker.num_clients;
            }
        }
    }
    
    if(target == NULL){
        if(workers.size() < max_workers){
            // start a new worker
            WorkerProcess worker;
            start_worker_process(serverfd, worker);
            workers.push_back(worker);
            
            return find_the_required_worker(serverfd, workers, client_required_id, pre_pid);
        }
        else{
            err_exit("[master] all " << workers.size() << " workers overloaded!");
        }
    }
    info("[master] find a target to passing fd, pid=" << target->pid << ", num_clients=" << target->num_clients << endl);
    target->num_clients ++;
    
    return target->channel;
}
void on_channel_message(int serverfd, WorkerProcess& host, vector<WorkerProcess>& workers){
    int sock = host.channel;
    
    channel_msg msg;
    read_channel(sock, &msg, sizeof(channel_msg));
    host.num_clients = msg.total_client;
    
    cout << "[master] get a channel msg, pid=#" << host.pid << ", fd=" << msg.fd 
        << ", total_client=" << msg.total_client
        << ", param=" << msg.param << ", command=" << msg.command << endl;
    if(msg.command == CMD_FD){
        // dispatch and re-dispatch fd
        int channel = find_the_required_worker(serverfd, workers, msg.param, host.pid);
        msg.command = CMD_FD;
        if(msg.fd == 0 || msg.fd == 1){
            info("[master][warning] ignore invalid fd to passing: " << msg.fd << endl);
            return;
        }
        write_channel(channel, &msg, sizeof(channel_msg));
        // direct close the fd, the target worker MUST take it or close it.
        close(msg.fd);
        host.num_clients--;
    }
    else if(msg.command == CMD_INFO){
        return;
    }
    else{
        err_exit("[master] invalid response!");
    }
}

int main(int argc, char** argv){
    info("[master] SIGRTMIN=0x" << hex << SIGRTMIN 
        << ", SIGRTMAX=0x" << hex << SIGRTMAX << dec 
        << ", SIGIO=0x" << hex << SIGIO << dec 
        << ", SIGPOLL=0x" << hex << SIGPOLL << dec 
        << endl);
    register_signal_handler();
    block_specified_signals();
    
    //sleep(3);
    UserOptions options;
    discovery_user_options(argc, argv, options);
    int serverfd = listen_server_socket(options);
    
    // single process mode
    if(options.num_processes == 0){
        worker_process_cycle(serverfd, -1);
    }
    // multiple processes mode
    else{
        vector<WorkerProcess> workers;
        for(int i = 0; i < options.num_processes; i++){
            WorkerProcess worker;
            start_worker_process(serverfd, worker);
            workers.push_back(worker);
        }
        
        // the empty set for sigsuspend
        sigset_t set;
        sigemptyset(&set);
        // master process use signal only.
        for(;;){
            sigsuspend(&set);
            
            if(global_terminate){
                // kill all workers.
                for(vector<WorkerProcess>::iterator ite = workers.begin(); ite != workers.end(); ++ite){
                    WorkerProcess& worker = *ite;
                    kill(worker.pid, SIGTERM);
                }
                // wait for workers to exit.
                for(vector<WorkerProcess>::iterator ite = workers.begin(); ite != workers.end(); ++ite){
                    WorkerProcess& worker = *ite;
                    int status;
                    waitpid(worker.pid, &status, 0);
                }
                info("[master] all worker terminated, master exit" << endl);
                exit(0);
            }
            if(global_channel_msg){
                int active_fd = current_active_io_fd;
                current_active_io_fd = -1;
                global_channel_msg = false;
                
                // read each channel.
                for(vector<WorkerProcess>::iterator ite = workers.begin(); ite != workers.end(); ++ite){
                    WorkerProcess& worker = *ite;
                    if(worker.channel == active_fd){
                        on_channel_message(serverfd, worker, workers);
                        break;
                    }
                }
            }
        }
    }

    return 0;
}

// mpclient2 —— multiple processes client
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
using namespace std;

#include <unistd.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <string.h>

#define err_exit(msg) cout << "[error] " << msg << endl; exit(1)

struct UserOptions{
    char* server_ip;
    int port;
    int num_clients;
    int sleep_ms;
};

void discovery_user_options(int argc, char** argv, UserOptions& options){
    if(argc <= 4){
        cout << "Usage: " << argv[0] << " <server_ip> <port> <num_clients> <sleep_ms>" << endl
            << "server_ip: the ip address of server" << endl
            << "port: the port to connect at" << endl
            << "num_clients: the num to start client." << endl
            << "sleep_ms: the time to sleep in ms." << endl
            << "for example: " << argv[0] << " 127.0.0.1 1990 1000 500" << endl;
        exit(1);
    }
    
    options.server_ip = argv[1];
    options.port = atoi(argv[2]);
    options.num_clients = atoi(argv[3]);
    options.sleep_ms = atoi(argv[4]);
    if(options.num_clients <= 0){
        err_exit("num_clients must > 0.");
    }
}

int connect_server_socket(UserOptions& options){
    int clientfd = socket(AF_INET, SOCK_STREAM, 0);
    
    if(clientfd == -1){
        err_exit("init socket error!");
    }
    cout << "init socket success!" << endl;
    
    sockaddr_in addr;
    addr.sin_family = AF_INET;
    addr.sin_port = htons(options.port);
    addr.sin_addr.s_addr = inet_addr(options.server_ip);
    if(connect(clientfd, (const sockaddr*)&addr, sizeof(sockaddr_in)) == -1){
        err_exit("connect socket error!");
    }
    cout << "connect socket success!" << endl;
    
    return clientfd;
}

int main(int argc, char** argv){
    srand(0);
    UserOptions options;
    discovery_user_options(argc, argv, options);
    int* fds = new int[options.num_clients];
    for(int i = 0; i < options.num_clients; i++){
        fds[i] = connect_server_socket(options);
        usleep(10 * 1000); // to prevent the SIGIO error: maybe signal queue overflow!
    }
    
    for(int i = 0; i < options.num_clients; i++){
        int fd = fds[i];
        if(true){
            char c = 'C'; // control message
            if(send(fd, &c, sizeof(char), 0) <= 0){
                close(fd);
                err_exit("send message failed!");
            }
            int required_id = rand();
            if(send(fd, &required_id, sizeof(int), 0) <= 0){
                close(fd);
                err_exit("send message failed!");
            }
        }
    }
    
    bool do_loop = true;
    while(do_loop){
        int ret = 0;
        for(int i = 0; i < options.num_clients; i++){
            int fd = fds[i];
            if(true){
                char c = 'M'; // data message
                ret = send(fd, &c, sizeof(char), 0);
                if(ret <= 0){
                    cout << "send control message to server error" << endl;
                    close(fd);
                    do_loop = false;
                    break;
                }
                char msg[] = "client, ping message!";
                ret = send(fd, msg, sizeof(msg), 0);
                if(ret <= 0){
                    cout << "send control value to server error" << endl;
                    close(fd);
                    do_loop = false;
                    break;
                }
                memset(&msg, 0, sizeof(msg));
            }
            if(true){
                char buf[1024];
                memset(buf, 0, sizeof(buf));
                ret = recv(fd, buf, sizeof(buf), 0);
                if(ret <= 0){
                    cout << "recv from server error" << endl;
                    close(fd);
                    do_loop = false;
                    break;
                }
                cout << "recv from server: " << buf << endl;
            }
            
            usleep(options.sleep_ms * 1000);
        }
    }
    
    delete[] fds;

    return 0;
}


2020-03-10 21:25:39 Fweiren 阅读数 28

由于云服务器的内存只有1G,以至于服务器进程随着时间推移占用内存过高,导致网站数据加载不起来,甚至宕机,所以为了优化,我们需要找出哪些进程占用大量内存。

top 命令

一、按进程的内存使用率排序

运行 top 命令后,键入大写 M。有两种途径:

  • a) 打开大写键盘的情况下,直接按M键
  • b) 未打开大写键盘的情况下,Shift+M键

二、按进程的CPU使用率排序

运行 top 命令后,键入大写 P。有两种途径:

  • a) 打开大写键盘的情况下,直接按P键
  • b) 未打开大写键盘的情况下,Shift+P键
列名 解释
PID 进程id
USER 进程所有者的用户名
PR 优先级
NI nice值。负值表示高优先级,正值表示低优先级
VIRT 进程使用的虚拟内存总量,单位kb。VIRT=SWAP+RES
RES 进程使用的、未被换出的物理内存大小,单位kb。RES=CODE+DATA
SHR 共享内存大小,单位kb
S 进程状态。D=不可中断的睡眠状态 R=运行 S=睡眠 T=跟踪/停止 Z=僵尸进程
%CPU 上次更新到现在的CPU时间占用百分比
%MEM 进程使用的物理内存百分比
TIME+ 进程使用的CPU时间总计,单位1/100秒
COMMAND 命令名/命令行

ps 命令

  • 查看占用内存最高的10个进程
ps aux | sort -k4nr | head -n 10
  • 查看占用CPU最高的10个进程
ps aux | sort -k3nr | head -n 10

free 命令

free -h

参考

top命令按内存和cpu排序

2017-02-06 11:27:00 J080624 阅读数 472

【1】基本介绍

在LINUX中,每个执行的程序(代码)都称为一个进程。每一个进程都分配一个ID号。每一个进程,都会对应一个父进程,而这个父进程可以复制多个子进程。例如www服务器。

每个进程都可能以两种方式存在的:前台与后台。所谓前台进程就是用户目前的屏幕上可以进行操作的。后台进程则是实际在操作,但由于屏幕上无法看到的进程,通常使用后台方式执行。

一般系统的服务都是以后台进程的方式存在,而且都会常驻在系统中。直到关机才才结束。

其实服务本质就是后台进程。

服务(service) 本质就是进程,但是是运行在后台的,通常都会监听某个端口,等待其它程序的请求,比如(mysql , sshd 防火墙等),因此我们又称为守护进程,是Linux中非常重要的知识点

如下所示是Windows任务管理器下进程任务查看图:
在这里插入图片描述

linux下查看进行参考博文ps 命令查看系统进程

【2】终止进程

① kill

kill命令用来删除执行中的程序或工作。kill可将指定的信息送至程序。预设的信息为SIGTERM(15),可将指定程序终止。若仍无法终止该程序,可使用SIGKILL(9)信息尝试强制删除程序。程序或工作的编号可利用ps指令或job指令查看。

语法

kill(选项)(参数)

选项

-a:当处理当前进程时,不限制命令名和进程号的对应关系;
-l <信息编号>:若不加<信息编号>选项,则-l参数会列出全部的信息名称;
-p:指定kill 命令只打印相关进程的进程号,而不发送任何信号;
-s <信息名称或编号>:指定要送出的信息;
-u:指定用户。

参数

进程或作业识别号:指定要删除的进程或作业。


② killall

killall命令使用进程的名称来杀死进程,使用此指令可以杀死一组同名进程。我们可以使用kill命令杀死指定进程PID的进程,如果要找到我们需要杀死的进程,我们还需要在之前使用ps等命令再配合grep来查找进程,而killall把这两个过程合二为一,是一个很好用的命令。

语法

killall(选项)(参数)

选项

-e:对长名称进行精确匹配;
-l:忽略大小写的不同;
-p:杀死进程所属的进程组;
-i:交互式杀死进程,杀死进程前需要进行确认;
-l:打印所有已知信号列表;
-q:如果没有进程被杀死。则不输出任何信息;
-r:使用正规表达式匹配要杀死的进程名称;
-s:用指定的进程号代替默认信号“SIGTERM”;
-u:杀死指定用户的进程。

参数

进程名称:指定要杀死的进程名称。

实例

杀死所有同名进程

killall vi

【3】top命令监控服务器状态

top命令可以实时动态地查看系统的整体运行情况,是一个综合了多方信息监测系统性能和运行信息的实用工具。通过top命令所提供的互动式界面,用热键可以管理。

语法

top(选项)

选项

-b:以批处理模式操作;
-c:显示完整的治命令;
-d:屏幕刷新间隔时间;
-I:忽略失效过程;
-s:保密模式;
-S:累积模式;
-i<时间>:设置间隔时间;
-u<用户名>:指定用户名;
-p<进程号>:指定进程;
-n<次数>:循环显示的次数。

top交互命令

在top命令执行过程中可以使用的一些交互命令。这些命令都是单字母的,如果在命令行中使用了-s选项, 其中一些命令可能会被屏蔽。

h:显示帮助画面,给出一些简短的命令总结说明;
k:终止一个进程;
i:忽略闲置和僵死进程,这是一个开关式命令;
q:退出程序;
r:重新安排一个进程的优先级别;
S:切换到累计模式;
s:改变两次刷新之间的延迟时间(单位为s),如果有小数,就换算成ms。输入0值则系统将不断刷新,默认值是5s;
f或者F:从当前显示中添加或者删除项目;
o或者O:改变显示项目的顺序;
l:切换显示平均负载和启动时间信息;
m:切换显示内存信息;
t:切换显示进程和CPU状态信息;
c:切换显示命令名称和完整命令行;
M:根据驻留内存大小进行排序;
P:根据CPU使用百分比大小进行排序;
N:根据PID大小排序;
T:根据时间/累计时间进行排序;
w:将当前设置写入~/.toprc文件中。

实例

top - 09:44:56 up 16 days, 21:23,  1 user,  load average: 9.59, 4.75, 1.92
Tasks: 145 total,   2 running, 143 sleeping,   0 stopped,   0 zombie
Cpu(s): 99.8%us,  0.1%sy,  0.0%ni,  0.2%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
Mem:   4147888k total,  2493092k used,  1654796k free,   158188k buffers
Swap:  5144568k total,       56k used,  5144512k free,  2013180k cached

实例说明

  • top - 09:44:56[当前系统时间],
  • 16 days[系统已经运行了16天],
  • 1 user[个用户当前登录],
  • load average: 9.59, 4.75, 1.92[系统负载,即任务队列的平均长度]
  • Tasks: 145 total[总进程数],
  • 2 running[正在运行的进程数],
  • 143 sleeping[睡眠的进程数],
  • 0 stopped[停止的进程数],
  • 0 zombie[冻结进程数],
  • Cpu(s): 99.8%us[用户空间占用CPU百分比],
  • 0.1%sy[内核空间占用CPU百分比],
  • 0.0%ni[用户进程空间内改变过优先级的进程占用CPU百分比],
  • 0.2%id[空闲CPU百分比], 0.0%wa[等待输入输出的CPU时间百分比],
  • 0.0%hi[],
  • 0.0%st[],
  • Mem: 4147888k total[物理内存总量],
  • 2493092k used[使用的物理内存总量],
  • 1654796k free[空闲内存总量],
  • 158188k buffers[用作内核缓存的内存量]
  • Swap: 5144568k total[交换区总量],
  • 56k used[使用的交换区总量],
  • 5144512k free[空闲交换区总量],
  • 2013180k cached[缓冲的交换区总量],

参考博文:Linux - 管道(|)和grep 命令

2019-09-15 14:16:18 AlluRe_AmoR 阅读数 75

Linux服务器使用(一)

1、进程

快捷键:

Ctrl+C
终止当前执行程序
Ctrl+Z
把前台正在运行的程序挂起并暂停
Ctrl+D
退出当前shell
Ctrl+S
挂起当前shell
Ctrl+Q
解冻挂起的shell,不可行就重新连接打开一个终端,reboot linux 或 kill 相关进程。

指令:

bg 将刚挂起的命令放到后台运行(比如:Ctrl+Z之后的)
bg %5 将第5个job放到后台运行
fg 将刚挂起的命令调到前台运行
fg %5 将第5个job调到前台运行
jobs 可以查看当前用户下后台正执行哪些程序
jobs -l 可以显示程序的PID
kill %5 杀死第5个job
也可以 kill (+PID)

后台执行

可以用 Command xxxx & 即直接在指令后加&,就可以将程序后台执行
之后可以加bg fg 配合使用

但是这样会导致若关闭Shell,后台的程序的全部停止运行
所以,有以下两种:screen 和

screen

1、安装screen命令
yum install screen #安装
2、创建screen会话
screen -S py1 #创建screen会话,自定义screen虚拟终端的名称,py1可以改为你想要的名称
创建会话后自动进入该虚拟终端
4、测试
我们可以执行一个服务,然后关闭SSH远程连接窗口,然后重新登录
screen -r #查看之前的虚拟终端

建立screen: screen -S py1 或者 screen
暂时离开screen会话窗口:Ctrl +A 再按D即可。此时服务器后台仍执行程序,即使远程断开也不影响。
想返回时,screen -r
假如同时开了多个,输入screen -r,会返回多个screen,选择要返回的screen的pid,即screen -r screenID
screen -ls #可以查看所有screen会话的
exit 和 Ctrl +C #要在screen中退出screen
#在Shell中不进入screen删除screen有以下两种:
screen -wipe py1
screen -S screenID -X quit # 需要先知道想删除的screen的ID

nohup

使用&命令后,作业被提交到后台运行,当前控制台没有被占用,但是一但把当前控制台关掉(退出帐户时),作业就会停止运行。nohup命令可以在你退出帐户之后继续运行相应的进程。nohup就是不挂起的意思( no hang up ).该命令的一般形式为:

nohup command &

如果使用nohup命令提交作业,那么在缺省情况下该作业的所有输出都被重定向到一个名为nohup.out的文件中,除非另外指定了输出文件

使用了nohup之后,不能理所当然不管了,因为有可能在当前账户非正常退出或者结束的时候,命令还是结束了。所以在使用nohup命令后台运行命令之后,需要使用exit正常退出当前账户,这样才能保证命令一直在后台运行。

没有更多推荐了,返回首页