精华内容
下载资源
问答
  • MPI

    2021-03-23 22:27:57
  • mpi

    2018-10-31 14:46:52
    https://blog.csdn.net/yanxiangtianji/article/details/55255318 ***MPI选择错误的网卡/网段的解决方案 https://docs.nvidia.com/deeplearning/sdk/nccl-developer-guide/index.html#ncclknobs  ...
    展开全文
  • mpi-examples:MPI程序
  • mpi programming

    2019-02-19 12:45:10
    Practical MPI Programming MPI Programming Guide Parallel Programming in OpenMP
  • Microsoft的MPIMPI_SDK

    2021-03-19 20:44:37
    如题是Microsoft的MPI最新安装包及其SDK包
  • MPI 01 MPI基础

    2019-07-03 11:09:49
    Message Passing Interface (MPI) :消息传递接口是一个并行计算的应用程序接口(API)标准。目前主要有两种实现。OpenMPI和MPICH。MPI标准目前已经发展到4.0。 下面是一些主要资料: MPI论坛(MPI Forum): ...

    01 基本信息

    Message Passing Interface (MPI) :消息传递接口是一个并行计算的应用程序接口(API)标准。目前主要有两种实现。OpenMPI和MPICH。MPI标准目前已经发展到4.0。
    下面是一些主要资料:
    MPI论坛(MPI Forum): https://www.mpi-forum.org/
    OpenMPI: https://www.open-mpi.org/
    MPICH: http://www.mpich.org/
    https://mpitutorial.com/tutorials/mpi-hello-world/zh_cn/
    微软的资料:
    https://docs.microsoft.com/en-us/message-passing-interface/microsoft-mpi
    下面介绍windows10上微软的microsoft-mpi的单机使用。

    02 microsoft-mpi单机环境

    02.01 windows上面的MS-MPI安装包

    从http://www.mpich.org/的download页面选择最新的MS-MPI v10.0版本(https://www.microsoft.com/en-us/download/details.aspx?id=57467)。
    下载页面中有开发用的sdk安装包(sdk:msmpisdk.msi)运行使用环境安装包(msmpisetup.exe)
    1 SDK安装
    sdk:msmpisdk.msi默认安装在C:\Program Files (x86)\Microsoft SDKs\MPI目录。里面有开发用的头文件和lib库。
    C:\Program Files (x86)\Microsoft SDKs\MPI\Include
    C:\Program Files (x86)\Microsoft SDKs\MPI\Lib\x64
    C:\Program Files (x86)\Microsoft SDKs\MPI\Lib\x86
    2 运行环境安装
    msmpisetup.exe默认会被安装在C:\Program Files\Microsoft MPI
    C:\Program Files\Microsoft MPI\Bin\mpiexec.exe
    C:\Program Files\Microsoft MPI\Bin\msmpilaunchsvc.exe
    C:\Program Files\Microsoft MPI\Bin\smpd.exe

    02.02 使用cmake3.14.5和vs2019构建开发环境

    1 创建源码管理目录
    D:\git\cpp\cpp_demo\MPI
    2 构建练习的工程配置文件
    D:\git\cpp\cpp_demo\MPI\CMakeLists.txt

    cmake_minimum_required(VERSION 3.2)
    if(CMAKE_VERSION VERSION_LESS 3.2)
      return()
    endif()
    
    set(proname "MPI")
    
    project("MPI")
    
    add_executable(${proname} 
      src/MPI01.01.h
      src/MPI01.01.cpp
      src/MPI_main.cpp)
      
    if (WIN32)
      if(MSVC)
        # 增加一些编译属性的方法
        target_compile_options(${proname} PRIVATE
    	  #/openmp
          #/wd4996
          #/wd4503
    	  #/std:c++17
        )
    	if(CMAKE_SIZEOF_VOID_P EQUAL 8)
    	  set(_lib_suffix x64)
    	else()
    	  set(_lib_suffix x86)
    	endif()
    	# MPI SDK安装路径
    	set(mpi_path "C:/Program Files (x86)/Microsoft SDKs/MPI" CACHE STRING "mpi include path" FORCE)
    	include_directories("${mpi_path}/include")
    	# 设置一些链接属性
    	set_target_properties(${proname}
            PROPERTIES
    		# MPI静态库路径
            LINK_FLAGS /LIBPATH:"${mpi_path}/Lib/${_lib_suffix}"
        )
    	# MPI的静态库引入
    	target_link_libraries(${proname} msmpi)
      endif()
      # g++ 
      # SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-elide-constructors")
    endif()
    

    c++文件:
    D:\git\cpp\cpp_demo\MPI\src\MPI01.01.h

    #pragma once
    
    void test01_01();
    

    D:\git\cpp\cpp_demo\MPI\src\MPI01.01.cpp

    #include "MPI01.01.h"
    #include "mpi.h"
    
    #include <iostream>
    
    void test01_01() {
        // 初始化 MPI 环境
        MPI_Init(NULL, NULL);
    
        // 通过调用以下方法来得到所有可以工作的进程数量
        int world_size;
        MPI_Comm_size(MPI_COMM_WORLD, &world_size);
    
        // 得到当前进程的秩
        int world_rank;
        MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
    
        // 得到当前进程的名字
        char processor_name[MPI_MAX_PROCESSOR_NAME];
        int name_len;
        MPI_Get_processor_name(processor_name, &name_len);
    
        // 打印一条带有当前进程名字,秩以及
        // 整个 communicator 的大小的 hello world 消息。
        std::cout << "Hello world from processor " << processor_name << ", rank " 
            << world_rank << " out of " << world_size << " processors" << std::endl;
    
        // 释放 MPI 的一些资源
        MPI_Finalize();
    }
    

    D:\git\cpp\cpp_demo\MPI\src\MPI_main.cpp

    #include "MPI01.01.h"
    int main(int argc, char** argv) {
        test01_01();
    	return 0;
    }
    

    3 生成工程文件
    在这里插入图片描述
    4 运行效果
    mpiexec.exe -n 8 mpi.exe
    在这里插入图片描述

    03 入门介绍

    https://mpitutorial.com/tutorials/
    MPI 教程介绍(https://mpitutorial.com/tutorials/mpi-introduction/zh_cn/)
    MPI Hello World(https://mpitutorial.com/tutorials/mpi-hello-world/zh_cn/)
    MPI Send and Receive(https://mpitutorial.com/tutorials/mpi-send-and-receive/zh_cn/)
    MPI Scatter, Gather, and Allgather(https://mpitutorial.com/tutorials/mpi-scatter-gather-and-allgather/zh_cn/)

    展开全文
  • 这个代码用于测试mpi全局通信,针对不同的情况,分别使用了MPI_Alltoall和MPI_Alltoallv
  • <div><p>utilizing MPI Fortran interfaces "use mpi, and use mpi_f08" does not match with "mpif.h" when I am using MPI_Waitall and give MPI_STATUS_IGNORE as an input <pre><code> fortran...
  • MPI backend

    2021-01-01 07:43:36
    <div><p>Some people claim that MPI+MPI with MPI3 is the way to go. It could be interesting to think about implementing MPI backend for Kokkos to progress the discussion.</p><p>该提问来源于开源项目...
  • mpi22-report.pdf
  • MPI support

    2020-12-02 01:39:42
    <div><p>This PR adds MPI support to Shifter. <p>The user can activate the MPI support either through the <strong>--mpi</strong> CLI option of Shifter: <pre><code> srun -N 2 shifter --mpi --image=...
  • 在Linux环境下,使用配置号的mpi库,实现多进程并发执行。是一个mpi初步小练习。适合初学mpi人练习使用 ****************************************************** ****这里请注意,是在Linux下编译和运行的。编译...
  • MPI_Bcast和MPI_Gather例子

    2018-01-01 20:40:57
    测试mpi集合通信的例子,包含MPI_Bcast和MPI_Gather。
  • 如果我们需要在不同进程间完成数据的聚集操作,有如下两种可以选择,然后我理解的区别是:MPI_Allgatherv传递的数组个数可以不一样,可以自己设置每个进程中的数量(即偏移量);MPI_Allgather传递的数组长度都一样。 ...

    如果我们需要在不同进程间完成数据的聚集操作,有如下两种可以选择,然后我理解的区别是:MPI_Allgatherv传递的数组个数可以不一样,可以自己设置每个进程中的数量(即偏移量);MPI_Allgather传递的数组长度都一样。
    在这里插入图片描述
    具体使用可以分别参照下面的两篇博客:
    MPI学习-MPI_Gather and MPI_Allgather
    MPI_Allgatherv函数讲解

    下面放一个MPI_Allgather的例子(每个work发送3个数据):

    /**
    * 全收集函数示例, 每个进程都拥有收集结果,
    * 相当于以每个进程作为根进程对同样的数据执行一次收集操作.
    */
    // #include "stdafx.h"
    #include <stdio.h>
    #include <stdlib.h>
    #include <mpi.h>
    
    #define N 3
    
    int
    main(int argc, char *argv[])
    {
        int i, myrank, nprocs;
        int *send_buffer;
        int *recv_buffer;
    
        MPI_Init(&argc, &argv);
        MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
        MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    
        send_buffer = new int[N];
    
        for (i = 0; i < N; i++)
            send_buffer[i] = myrank*10 + i;
    
        // 在每个进程上, recv_buffer的大小为所有进程的send_buffer大小之和.
        recv_buffer =new int[nprocs * N];
    
        MPI_Allgather(send_buffer, N, MPI_INT,
            recv_buffer, N, MPI_INT, MPI_COMM_WORLD);
    
        for (i = 0; i < nprocs * N; i++)
            fprintf(stderr, "myrank = %d, recv_buffer[%d] = %d\n", myrank, i, recv_buffer[i]);
    
        fprintf(stderr, "\n");
        delete[]recv_buffer;
        delete[]send_buffer;
        MPI_Finalize();
        return 0;
    }
    

    linux下运行命令(文件名:test3.cc):
    编译:mpicxx -g -Wall -o test3.o test3.cc
    运行:mpirun -n 4 ./test3.o
    效果如下:

    myrank = 0, recv_buffer[0] = 0
    myrank = 0, recv_buffer[1] = 1
    myrank = 0, recv_buffer[2] = 2
    myrank = 0, recv_buffer[3] = 10
    myrank = 0, recv_buffer[4] = 11
    myrank = 0, recv_buffer[5] = 12
    myrank = 0, recv_buffer[6] = 20
    myrank = 0, recv_buffer[7] = 21
    myrank = 0, recv_buffer[8] = 22
    myrank = 0, recv_buffer[9] = 30
    myrank = 0, recv_buffer[10] = 31
    myrank = 0, recv_buffer[11] = 32
    
    myrank = 1, recv_buffer[0] = 0
    myrank = 1, recv_buffer[1] = 1
    myrank = 1, recv_buffer[2] = 2
    myrank = 1, recv_buffer[3] = 10
    myrank = 1, recv_buffer[4] = 11
    myrank = 1, recv_buffer[5] = 12
    myrank = 1, recv_buffer[6] = 20
    myrank = 1, recv_buffer[7] = 21
    myrank = 1, recv_buffer[8] = 22
    myrank = 1, recv_buffer[9] = 30
    myrank = 1, recv_buffer[10] = 31
    myrank = 1, recv_buffer[11] = 32
    
    myrank = 2, recv_buffer[0] = 0
    myrank = 2, recv_buffer[1] = 1
    myrank = 2, recv_buffer[2] = 2
    myrank = 2, recv_buffer[3] = 10
    myrank = 2, recv_buffer[4] = 11
    myrank = 2, recv_buffer[5] = 12
    myrank = 2, recv_buffer[6] = 20
    myrank = 2, recv_buffer[7] = 21
    myrank = 2, recv_buffer[8] = 22
    myrank = 2, recv_buffer[9] = 30
    myrank = 2, recv_buffer[10] = 31
    myrank = 2, recv_buffer[11] = 32
    
    myrank = 3, recv_buffer[0] = 0
    myrank = 3, recv_buffer[1] = 1
    myrank = 3, recv_buffer[2] = 2
    myrank = 3, recv_buffer[3] = 10
    myrank = 3, recv_buffer[4] = 11
    myrank = 3, recv_buffer[5] = 12
    myrank = 3, recv_buffer[6] = 20
    myrank = 3, recv_buffer[7] = 21
    myrank = 3, recv_buffer[8] = 22
    myrank = 3, recv_buffer[9] = 30
    myrank = 3, recv_buffer[10] = 31
    myrank = 3, recv_buffer[11] = 32
    
    展开全文
  • mpi.cr:Crystal语言的MPI绑定
  • MPI.jl:Julia的MPI包装器
  • 适用于Python的MPI 概述 欢迎使用MPI for Python。该软件包为消息传递接口( )标准提供Python绑定。它是在MPI-1 / 2/3规范的基础上实现的,并公开了基于标准MPI-2 C ++绑定的API。 依存关系 或更高版本,或 2.0或...

空空如也

空空如也

1 2 3 4 5 ... 20
收藏数 16,959
精华内容 6,783
热门标签
关键字:

mpi