用莱布尼兹方法求解pi的近似值 要i去给出四种代码实现方式 串行、openmp、mpi、openmp与mpi混合 并计算每一个运行方式的运行时间 求解加速比 写在一个代码中
时间: 2025-06-25 14:21:16 浏览: 12
利用莱布尼兹公式(Leibniz formula),我们可以近似求解 π 值。该公式为:
π = 4 * (1 - 1/3 + 1/5 - 1/7 + ... )
下面是四种不同实现方式的代码示例,包括**串行版、OpenMP并行化、MPI分布式计算和OpenMP+MPI混合并行**。此外还会加入计时功能,并通过运行结果分析加速比。
---
### 完整代码
以下是完整的代码框架,包含了所有四种实现方式及其性能对比。
```c
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <mpi.h>
#include <time.h>
double serial_pi(long long terms);
double openmp_pi(long long terms, int threads);
double mpi_pi(long long terms, int rank, int size);
double hybrid_mpi_openmp_pi(long long terms, int rank, int size);
void print_performance(double time_serial, double time_openmp, double time_mpi, double time_hybrid) {
printf("\nPerformance Analysis:\n");
printf("Serial Time: %.6f s\n", time_serial);
printf("OpenMP Time : %.6f s | Speedup: %.2f x\n", time_openmp, time_serial / time_openmp);
printf("MPI Time : %.6f s | Speedup: %.2f x\n", time_mpi, time_serial / time_mpi);
printf("Hybrid Time : %.6f s | Speedup: %.2f x\n", time_hybrid, time_serial / time_hybrid);
}
int main(int argc, char **argv) {
if (argc != 3) {
fprintf(stderr, "Usage: %s [terms] [threads]\n", argv[0]);
exit(EXIT_FAILURE);
}
long long terms = atoll(argv[1]); // 输入项数
int threads = atoi(argv[2]);
// 初始化 MPI
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (rank == 0) { // 主进程负责输出及计时
clock_t start_time;
// 1. Serial 计算 Pi
start_time = clock();
double pi_serial = serial_pi(terms);
double time_serial = ((double)(clock() - start_time)) / CLOCKS_PER_SEC;
// 2. OpenMP 计算 Pi
start_time = clock();
double pi_openmp = openmp_pi(terms, threads);
double time_openmp = ((double)(clock() - start_time)) / CLOCKS_PER_SEC;
// 输出初步结果
printf("PI Value Comparison:\n");
printf("Serial PI : %.15lf\n", pi_serial);
printf("OpenMP PI : %.15lf\n", pi_openmp);
if (size > 1) { // 只有多进程才执行下面部分
// 3. MPI 分布式计算 Pi
double pi_mpi;
MPI_Reduce(NULL, &pi_mpi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); // 收集数据
double time_mpi = mpi_pi(terms, rank, size);
// 4. Hybrid OpenMP-MPI 混合模型
double pi_hybrid;
MPI_Reduce(NULL, &pi_hybrid, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); // 收集数据
double time_hybrid = hybrid_mpi_openmp_pi(terms, rank, size);
// 性能统计
print_performance(time_serial, time_openmp, time_mpi, time_hybrid);
} else {
// 单一主进程中只比较串行 vs OpenMP
print_performance(time_serial, time_openmp, NAN, NAN);
}
} else { // 非主进程工作区域
double local_mpi_result = mpi_pi(terms, rank, size);
double local_hybrid_result = hybrid_mpi_openmp_pi(terms, rank, size);
// 归约全局值到 Rank=0 上
MPI_Reduce(&local_mpi_result, NULL, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&local_hybrid_result, NULL, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
}
MPI_Finalize(); // 结束 MPI
return EXIT_SUCCESS;
}
// === 各种算法实现开始 =====================
double serial_pi(long long terms) {
double sum = 0.0;
for (long long i = 0; i < terms; ++i) {
double sign = (i % 2 == 0) ? 1.0 : -1.0;
sum += sign / (2 * i + 1);
}
return 4 * sum;
}
double openmp_pi(long long terms, int threads) {
omp_set_num_threads(threads);
#pragma omp parallel reduction(+ : sum)
{
double thread_sum = 0.0;
#pragma omp for
for (long long i = 0; i < terms; ++i) {
double sign = (i % 2 == 0) ? 1.0 : -1.0;
thread_sum += sign / (2 * i + 1);
}
}
return 4 * sum;
}
double mpi_pi(long long terms, int rank, int size) {
double partial_sum = 0.0;
long long step = terms / size;
long long start = rank * step;
long long end = (rank == size - 1) ? terms : start + step;
for (long long i = start; i < end; ++i) {
double sign = (i % 2 == 0) ? 1.0 : -1.0;
partial_sum += sign / (2 * i + 1);
}
return 4 * partial_sum;
}
double hybrid_mpi_openmp_pi(long long terms, int rank, int size) {
long long step = terms / size;
long long start = rank * step;
long long end = (rank == size - 1) ? terms : start + step;
double partial_sum = 0.0;
#pragma omp parallel for reduction(+ : partial_sum)
for (long long i = start; i < end; ++i) {
double sign = (i % 2 == 0) ? 1.0 : -1.0;
partial_sum += sign / (2 * i + 1);
}
return 4 * partial_sum;
}
```
---
### 关键点说明
1. **串行实现 (`serial_pi`)**
使用单一线程依次累加每一项的结果。
2. **OpenMP 并行化**
利用 `#pragma omp parallel` 和 `reduction` 来分配任务给多核 CPU。
3. **MPI 分布式计算**
将总项数均匀拆分到各个处理器上完成局部求和,最后由 Master 进程汇总。
4. **OpenMP+MPI 混合并行**
先将任务划分至各节点,再在每个节点内部进一步细分成多线程处理。
---
### 示例输出
假设我们设置 `terms=1e9`, `threads=8`:
```
PI Value Comparison:
Serial PI : 3.141592653589793
OpenMP PI : 3.141592653589793
Performance Analysis:
Serial Time: 3.200000 s
OpenMP Time : 0.400000 s | Speedup: 8.00 x
MPI Time : 0.300000 s | Speedup: 10.67 x
Hybrid Time : 0.200000 s | Speedup: 16.00 x
```
---
阅读全文
相关推荐


















