确定金属中元素阵列的线程数
在此示例中,有两个大小的大1D数组n
。将数组在元素方面添加在一起,以使用ACCELERATE vdsp.add()
函数和金属GPU计算内核adder()
来计算1D结果数组。
// Size of each array
private let n = 5_000_000
// Create two random arrays of size n
private var array1 = (1...n).map{ _ in Float.random(in: 1...10) }
private var array2 = (1...n).map{ _ in Float.random(in: 1...10) }
// Add two arrays using Accelerate vDSP
addAccel(array1, array2)
// Add two arrays using Metal on the GPU
addMetal(array1, array2)
加速代码如下:
import Accelerate
func addAccel(_ arr1: [Float], _ arr2: [Float]) {
let tic = DispatchTime.now().uptimeNanoseconds
// Add two arrays and store results
let y = vDSP.add(arr1, arr2)
// Print out elapsed time
let toc = DispatchTime.now().uptimeNanoseconds
let elapsed = Float(toc - tic) / 1_000_000_000
print("\nAccelerate vDSP elapsed time is \(elapsed) s")
// Print out some results
for i in 0..<3 {
let a1 = String(format: "%.4f", arr1[i])
let a2 = String(format: "%.4f", arr2[i])
let y = String(format: "%.4f", y[i])
print("\(a1) + \(a2) = \(y)")
}
}
Metal 代码如下:
import MetalKit
private func setupMetal(arr1: [Float], arr2: [Float]) -> (MTLCommandBuffer?, MTLBuffer?) {
// Get the Metal GPU device
let device = MTLCreateSystemDefaultDevice()
// Queue for sending commands to the GPU
let commandQueue = device?.makeCommandQueue()
// Get our Metal GPU function
let gpuFunctionLibrary = device?.makeDefaultLibrary()
let adderGpuFunction = gpuFunctionLibrary?.makeFunction(name: "adder")
var adderComputePipelineState: MTLComputePipelineState!
do {
adderComputePipelineState = try device?.makeComputePipelineState(function: adderGpuFunction!)
} catch {
print(error)
}
// Create the buffers to be sent to the GPU from our arrays
let count = arr1.count
let arr1Buff = device?.makeBuffer(bytes: arr1,
length: MemoryLayout<Float>.size * count,
options: .storageModeShared)
let arr2Buff = device?.makeBuffer(bytes: arr2,
length: MemoryLayout<Float>.size * count,
options: .storageModeShared)
let resultBuff = device?.makeBuffer(length: MemoryLayout<Float>.size * count,
options: .storageModeShared)
// Create a buffer to be sent to the command queue
let commandBuffer = commandQueue?.makeCommandBuffer()
// Create an encoder to set values on the compute function
let commandEncoder = commandBuffer?.makeComputeCommandEncoder()
commandEncoder?.setComputePipelineState(adderComputePipelineState)
// Set the parameters of our GPU function
commandEncoder?.setBuffer(arr1Buff, offset: 0, index: 0)
commandEncoder?.setBuffer(arr2Buff, offset: 0, index: 1)
commandEncoder?.setBuffer(resultBuff, offset: 0, index: 2)
// Figure out how many threads we need to use for our operation
let threadsPerGrid = MTLSize(width: count, height: 1, depth: 1)
let maxThreadsPerThreadgroup = adderComputePipelineState.maxTotalThreadsPerThreadgroup
let threadsPerThreadgroup = MTLSize(width: maxThreadsPerThreadgroup, height: 1, depth: 1)
commandEncoder?.dispatchThreads(threadsPerGrid, threadsPerThreadgroup: threadsPerThreadgroup)
// Tell the encoder that it is done encoding. Now we can send this off to the GPU.
commandEncoder?.endEncoding()
return (commandBuffer, resultBuff)
}
func addMetal(_ arr1: [Float], _ arr2: [Float]) {
let (commandBuffer, resultBuff) = setupMetal(arr1: arr1, arr2: arr2)
let tic = DispatchTime.now().uptimeNanoseconds
// Push this command to the command queue for processing
commandBuffer?.commit()
// Wait until the GPU function completes before working with any of the data
commandBuffer?.waitUntilCompleted()
// Get the pointer to the beginning of our data
let count = arr1.count
var resultBufferPointer = resultBuff?.contents().bindMemory(to: Float.self, capacity: MemoryLayout<Float>.size * count)
// Print out elapsed time
let toc = DispatchTime.now().uptimeNanoseconds
let elapsed = Float(toc - tic) / 1_000_000_000
print("\nMetal GPU elapsed time is \(elapsed) s")
// Print out the results
for i in 0..<3 {
let a1 = String(format: "%.4f", arr1[i])
let a2 = String(format: "%.4f", arr2[i])
let y = String(format: "%.4f", Float(resultBufferPointer!.pointee))
print("\(a1) + \(a2) = \(y)")
resultBufferPointer = resultBufferPointer?.advanced(by: 1)
}
}
#include <metal_stdlib>
using namespace metal;
kernel void adder(
constant float *array1 [[ buffer(0) ]],
constant float *array2 [[ buffer(1) ]],
device float *result [[ buffer(2) ]],
uint index [[ thread_position_in_grid ]])
{
result[index] = array1[index] + array2[index];
}
下面在2019 MacBook Pro上运行上述代码的结果如下。笔记本电脑的规格为2.6 GHz 6核Intel Core i7,32 GB 2667 MHz DDR4,Intel UHD Graphics 630 1536 MB和AMD Radeon Pro 5500m。
Accelerate vDSP elapsed time is 0.004532601 s
7.8964 + 6.3815 = 14.2779
9.3661 + 8.9641 = 18.3301
4.5389 + 8.5737 = 13.1126
Metal GPU elapsed time is 0.012219718 s
7.8964 + 6.3815 = 14.2779
9.3661 + 8.9641 = 18.3301
4.5389 + 8.5737 = 13.1126
根据经过的时间,加速函数比金属计算函数快。我认为这是因为我没有正确定义线程。在此示例中,如何确定每个网格的最佳螺纹数量和每个线程组的线程数?
// Figure out how many threads we need to use for our operation
let threadsPerGrid = MTLSize(width: count, height: 1, depth: 1)
let maxThreadsPerThreadgroup = adderComputePipelineState.maxTotalThreadsPerThreadgroup
let threadsPerThreadgroup = MTLSize(width: maxThreadsPerThreadgroup, height: 1, depth: 1)
commandEncoder?.dispatchThreads(threadsPerGrid, threadsPerThreadgroup: threadsPerThreadgroup)
In this example there are two large 1D arrays of size n
. The arrays are added together element-wise to calculate a 1D results array using the Accelerate vDSP.add()
function and a Metal GPU compute kernel adder()
.
// Size of each array
private let n = 5_000_000
// Create two random arrays of size n
private var array1 = (1...n).map{ _ in Float.random(in: 1...10) }
private var array2 = (1...n).map{ _ in Float.random(in: 1...10) }
// Add two arrays using Accelerate vDSP
addAccel(array1, array2)
// Add two arrays using Metal on the GPU
addMetal(array1, array2)
The Accelerate code is shown below:
import Accelerate
func addAccel(_ arr1: [Float], _ arr2: [Float]) {
let tic = DispatchTime.now().uptimeNanoseconds
// Add two arrays and store results
let y = vDSP.add(arr1, arr2)
// Print out elapsed time
let toc = DispatchTime.now().uptimeNanoseconds
let elapsed = Float(toc - tic) / 1_000_000_000
print("\nAccelerate vDSP elapsed time is \(elapsed) s")
// Print out some results
for i in 0..<3 {
let a1 = String(format: "%.4f", arr1[i])
let a2 = String(format: "%.4f", arr2[i])
let y = String(format: "%.4f", y[i])
print("\(a1) + \(a2) = \(y)")
}
}
The Metal code is shown below:
import MetalKit
private func setupMetal(arr1: [Float], arr2: [Float]) -> (MTLCommandBuffer?, MTLBuffer?) {
// Get the Metal GPU device
let device = MTLCreateSystemDefaultDevice()
// Queue for sending commands to the GPU
let commandQueue = device?.makeCommandQueue()
// Get our Metal GPU function
let gpuFunctionLibrary = device?.makeDefaultLibrary()
let adderGpuFunction = gpuFunctionLibrary?.makeFunction(name: "adder")
var adderComputePipelineState: MTLComputePipelineState!
do {
adderComputePipelineState = try device?.makeComputePipelineState(function: adderGpuFunction!)
} catch {
print(error)
}
// Create the buffers to be sent to the GPU from our arrays
let count = arr1.count
let arr1Buff = device?.makeBuffer(bytes: arr1,
length: MemoryLayout<Float>.size * count,
options: .storageModeShared)
let arr2Buff = device?.makeBuffer(bytes: arr2,
length: MemoryLayout<Float>.size * count,
options: .storageModeShared)
let resultBuff = device?.makeBuffer(length: MemoryLayout<Float>.size * count,
options: .storageModeShared)
// Create a buffer to be sent to the command queue
let commandBuffer = commandQueue?.makeCommandBuffer()
// Create an encoder to set values on the compute function
let commandEncoder = commandBuffer?.makeComputeCommandEncoder()
commandEncoder?.setComputePipelineState(adderComputePipelineState)
// Set the parameters of our GPU function
commandEncoder?.setBuffer(arr1Buff, offset: 0, index: 0)
commandEncoder?.setBuffer(arr2Buff, offset: 0, index: 1)
commandEncoder?.setBuffer(resultBuff, offset: 0, index: 2)
// Figure out how many threads we need to use for our operation
let threadsPerGrid = MTLSize(width: count, height: 1, depth: 1)
let maxThreadsPerThreadgroup = adderComputePipelineState.maxTotalThreadsPerThreadgroup
let threadsPerThreadgroup = MTLSize(width: maxThreadsPerThreadgroup, height: 1, depth: 1)
commandEncoder?.dispatchThreads(threadsPerGrid, threadsPerThreadgroup: threadsPerThreadgroup)
// Tell the encoder that it is done encoding. Now we can send this off to the GPU.
commandEncoder?.endEncoding()
return (commandBuffer, resultBuff)
}
func addMetal(_ arr1: [Float], _ arr2: [Float]) {
let (commandBuffer, resultBuff) = setupMetal(arr1: arr1, arr2: arr2)
let tic = DispatchTime.now().uptimeNanoseconds
// Push this command to the command queue for processing
commandBuffer?.commit()
// Wait until the GPU function completes before working with any of the data
commandBuffer?.waitUntilCompleted()
// Get the pointer to the beginning of our data
let count = arr1.count
var resultBufferPointer = resultBuff?.contents().bindMemory(to: Float.self, capacity: MemoryLayout<Float>.size * count)
// Print out elapsed time
let toc = DispatchTime.now().uptimeNanoseconds
let elapsed = Float(toc - tic) / 1_000_000_000
print("\nMetal GPU elapsed time is \(elapsed) s")
// Print out the results
for i in 0..<3 {
let a1 = String(format: "%.4f", arr1[i])
let a2 = String(format: "%.4f", arr2[i])
let y = String(format: "%.4f", Float(resultBufferPointer!.pointee))
print("\(a1) + \(a2) = \(y)")
resultBufferPointer = resultBufferPointer?.advanced(by: 1)
}
}
#include <metal_stdlib>
using namespace metal;
kernel void adder(
constant float *array1 [[ buffer(0) ]],
constant float *array2 [[ buffer(1) ]],
device float *result [[ buffer(2) ]],
uint index [[ thread_position_in_grid ]])
{
result[index] = array1[index] + array2[index];
}
Results from running the above code on a 2019 MacBook Pro are given below. Specs for the laptop are 2.6 GHz 6-Core Intel Core i7, 32 GB 2667 MHz DDR4, Intel UHD Graphics 630 1536 MB, and AMD Radeon Pro 5500M.
Accelerate vDSP elapsed time is 0.004532601 s
7.8964 + 6.3815 = 14.2779
9.3661 + 8.9641 = 18.3301
4.5389 + 8.5737 = 13.1126
Metal GPU elapsed time is 0.012219718 s
7.8964 + 6.3815 = 14.2779
9.3661 + 8.9641 = 18.3301
4.5389 + 8.5737 = 13.1126
Based on the elapsed times, the Accelerate function is faster than the Metal compute function. I think this is because I did not properly define the threads. How do I determine the optimum number of threads per grid and threads per thread group for this example?
// Figure out how many threads we need to use for our operation
let threadsPerGrid = MTLSize(width: count, height: 1, depth: 1)
let maxThreadsPerThreadgroup = adderComputePipelineState.maxTotalThreadsPerThreadgroup
let threadsPerThreadgroup = MTLSize(width: maxThreadsPerThreadgroup, height: 1, depth: 1)
commandEncoder?.dispatchThreads(threadsPerGrid, threadsPerThreadgroup: threadsPerThreadgroup)
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论
评论(1)
对于金属,您正在测量从GPU到CPU的计算和数据传输的时间,并在CPU上创建数组。
您应该使用
For metal you are measuring time in both computation and data transfer from GPU to CPU and also creating array on CPU.
You should use addcompletedhandler for gpu compution time