Add CUDA to ROS Package










2














I would like to use cuda within a ros package. Has anyone a simple example for me?



I tried to built a static library with the cuda function and add this library to my package, but I get always a linking error: Undefined reference cuda...



I have built a executable instead of the library and it works.



Please help!










share|improve this question

















  • 1




    that means you need to link to the cuda library. if you post specific steps you did and compiler/linker output messages, people will be able to help you better
    – thang
    Sep 9 '14 at 15:11
















2














I would like to use cuda within a ros package. Has anyone a simple example for me?



I tried to built a static library with the cuda function and add this library to my package, but I get always a linking error: Undefined reference cuda...



I have built a executable instead of the library and it works.



Please help!










share|improve this question

















  • 1




    that means you need to link to the cuda library. if you post specific steps you did and compiler/linker output messages, people will be able to help you better
    – thang
    Sep 9 '14 at 15:11














2












2








2


1





I would like to use cuda within a ros package. Has anyone a simple example for me?



I tried to built a static library with the cuda function and add this library to my package, but I get always a linking error: Undefined reference cuda...



I have built a executable instead of the library and it works.



Please help!










share|improve this question













I would like to use cuda within a ros package. Has anyone a simple example for me?



I tried to built a static library with the cuda function and add this library to my package, but I get always a linking error: Undefined reference cuda...



I have built a executable instead of the library and it works.



Please help!







c++ cuda static-libraries linker-errors ros






share|improve this question













share|improve this question











share|improve this question




share|improve this question










asked Sep 9 '14 at 15:07









user2333894user2333894

6928




6928







  • 1




    that means you need to link to the cuda library. if you post specific steps you did and compiler/linker output messages, people will be able to help you better
    – thang
    Sep 9 '14 at 15:11













  • 1




    that means you need to link to the cuda library. if you post specific steps you did and compiler/linker output messages, people will be able to help you better
    – thang
    Sep 9 '14 at 15:11








1




1




that means you need to link to the cuda library. if you post specific steps you did and compiler/linker output messages, people will be able to help you better
– thang
Sep 9 '14 at 15:11





that means you need to link to the cuda library. if you post specific steps you did and compiler/linker output messages, people will be able to help you better
– thang
Sep 9 '14 at 15:11













1 Answer
1






active

oldest

votes


















5














I found a solution myself:



CMakeLists.txt:



cmake_minimum_required(VERSION 2.8.3)
PROJECT (beginner_tutorials)
FIND_PACKAGE(CUDA REQUIRED)

find_package(catkin REQUIRED COMPONENTS
roscpp
rospy
std_msgs
)

SET(CUDA_NVCC_FLAGS "-arch=sm_13" CACHE STRING "nvcc flags" FORCE)
SET (CUDA_VERBOSE_BUILD ON CACHE BOOL "nvcc verbose" FORCE)
SET(LIB_TYPE STATIC)
CUDA_ADD_LIBRARY(TestLib $LIB_TYPE src/helloWorld.cu)

catkin_package(
)
include_directories(
$catkin_INCLUDE_DIRS
)

ADD_EXECUTABLE(beginner_tutorials_node src/main.cpp)
ADD_DEPENDENCIES(beginner_tutorials_node TestLib)
TARGET_LINK_LIBRARIES(beginner_tutorials_node
$catkin_LIBRARIES
$PCL_LIBRARIES
TestLib
)


main.cpp:



int testmain();

int main()

testmain();
return 0;



helloWorld.cu:



#include <stdio.h>

#include <cuda.h>
#include <cuda_runtime.h>

const int N = 7;
const int blocksize = 7;

__global__
void hello(char *a, int *b)

a[threadIdx.x] += b[threadIdx.x];


int testmain()

char a[N] = "Hello ";
int b[N] = 15, 10, 6, 0, -11, 1, 0;

char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);

printf("%s", a);

cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );

dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );

printf("%sn", a);
return EXIT_SUCCESS;






share|improve this answer




















  • For anyone else stumbling accross this: It works just as well with a shared library, simply replace
    – Germanunkol
    Sep 5 '18 at 7:28










Your Answer






StackExchange.ifUsing("editor", function ()
StackExchange.using("externalEditor", function ()
StackExchange.using("snippets", function ()
StackExchange.snippets.init();
);
);
, "code-snippets");

StackExchange.ready(function()
var channelOptions =
tags: "".split(" "),
id: "1"
;
initTagRenderer("".split(" "), "".split(" "), channelOptions);

StackExchange.using("externalEditor", function()
// Have to fire editor after snippets, if snippets enabled
if (StackExchange.settings.snippets.snippetsEnabled)
StackExchange.using("snippets", function()
createEditor();
);

else
createEditor();

);

function createEditor()
StackExchange.prepareEditor(
heartbeatType: 'answer',
autoActivateHeartbeat: false,
convertImagesToLinks: true,
noModals: true,
showLowRepImageUploadWarning: true,
reputationToPostImages: 10,
bindNavPrevention: true,
postfix: "",
imageUploader:
brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
allowUrls: true
,
onDemand: true,
discardSelector: ".discard-answer"
,immediatelyShowMarkdownHelp:true
);



);













draft saved

draft discarded


















StackExchange.ready(
function ()
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f25748039%2fadd-cuda-to-ros-package%23new-answer', 'question_page');

);

Post as a guest















Required, but never shown

























1 Answer
1






active

oldest

votes








1 Answer
1






active

oldest

votes









active

oldest

votes






active

oldest

votes









5














I found a solution myself:



CMakeLists.txt:



cmake_minimum_required(VERSION 2.8.3)
PROJECT (beginner_tutorials)
FIND_PACKAGE(CUDA REQUIRED)

find_package(catkin REQUIRED COMPONENTS
roscpp
rospy
std_msgs
)

SET(CUDA_NVCC_FLAGS "-arch=sm_13" CACHE STRING "nvcc flags" FORCE)
SET (CUDA_VERBOSE_BUILD ON CACHE BOOL "nvcc verbose" FORCE)
SET(LIB_TYPE STATIC)
CUDA_ADD_LIBRARY(TestLib $LIB_TYPE src/helloWorld.cu)

catkin_package(
)
include_directories(
$catkin_INCLUDE_DIRS
)

ADD_EXECUTABLE(beginner_tutorials_node src/main.cpp)
ADD_DEPENDENCIES(beginner_tutorials_node TestLib)
TARGET_LINK_LIBRARIES(beginner_tutorials_node
$catkin_LIBRARIES
$PCL_LIBRARIES
TestLib
)


main.cpp:



int testmain();

int main()

testmain();
return 0;



helloWorld.cu:



#include <stdio.h>

#include <cuda.h>
#include <cuda_runtime.h>

const int N = 7;
const int blocksize = 7;

__global__
void hello(char *a, int *b)

a[threadIdx.x] += b[threadIdx.x];


int testmain()

char a[N] = "Hello ";
int b[N] = 15, 10, 6, 0, -11, 1, 0;

char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);

printf("%s", a);

cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );

dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );

printf("%sn", a);
return EXIT_SUCCESS;






share|improve this answer




















  • For anyone else stumbling accross this: It works just as well with a shared library, simply replace
    – Germanunkol
    Sep 5 '18 at 7:28















5














I found a solution myself:



CMakeLists.txt:



cmake_minimum_required(VERSION 2.8.3)
PROJECT (beginner_tutorials)
FIND_PACKAGE(CUDA REQUIRED)

find_package(catkin REQUIRED COMPONENTS
roscpp
rospy
std_msgs
)

SET(CUDA_NVCC_FLAGS "-arch=sm_13" CACHE STRING "nvcc flags" FORCE)
SET (CUDA_VERBOSE_BUILD ON CACHE BOOL "nvcc verbose" FORCE)
SET(LIB_TYPE STATIC)
CUDA_ADD_LIBRARY(TestLib $LIB_TYPE src/helloWorld.cu)

catkin_package(
)
include_directories(
$catkin_INCLUDE_DIRS
)

ADD_EXECUTABLE(beginner_tutorials_node src/main.cpp)
ADD_DEPENDENCIES(beginner_tutorials_node TestLib)
TARGET_LINK_LIBRARIES(beginner_tutorials_node
$catkin_LIBRARIES
$PCL_LIBRARIES
TestLib
)


main.cpp:



int testmain();

int main()

testmain();
return 0;



helloWorld.cu:



#include <stdio.h>

#include <cuda.h>
#include <cuda_runtime.h>

const int N = 7;
const int blocksize = 7;

__global__
void hello(char *a, int *b)

a[threadIdx.x] += b[threadIdx.x];


int testmain()

char a[N] = "Hello ";
int b[N] = 15, 10, 6, 0, -11, 1, 0;

char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);

printf("%s", a);

cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );

dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );

printf("%sn", a);
return EXIT_SUCCESS;






share|improve this answer




















  • For anyone else stumbling accross this: It works just as well with a shared library, simply replace
    – Germanunkol
    Sep 5 '18 at 7:28













5












5








5






I found a solution myself:



CMakeLists.txt:



cmake_minimum_required(VERSION 2.8.3)
PROJECT (beginner_tutorials)
FIND_PACKAGE(CUDA REQUIRED)

find_package(catkin REQUIRED COMPONENTS
roscpp
rospy
std_msgs
)

SET(CUDA_NVCC_FLAGS "-arch=sm_13" CACHE STRING "nvcc flags" FORCE)
SET (CUDA_VERBOSE_BUILD ON CACHE BOOL "nvcc verbose" FORCE)
SET(LIB_TYPE STATIC)
CUDA_ADD_LIBRARY(TestLib $LIB_TYPE src/helloWorld.cu)

catkin_package(
)
include_directories(
$catkin_INCLUDE_DIRS
)

ADD_EXECUTABLE(beginner_tutorials_node src/main.cpp)
ADD_DEPENDENCIES(beginner_tutorials_node TestLib)
TARGET_LINK_LIBRARIES(beginner_tutorials_node
$catkin_LIBRARIES
$PCL_LIBRARIES
TestLib
)


main.cpp:



int testmain();

int main()

testmain();
return 0;



helloWorld.cu:



#include <stdio.h>

#include <cuda.h>
#include <cuda_runtime.h>

const int N = 7;
const int blocksize = 7;

__global__
void hello(char *a, int *b)

a[threadIdx.x] += b[threadIdx.x];


int testmain()

char a[N] = "Hello ";
int b[N] = 15, 10, 6, 0, -11, 1, 0;

char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);

printf("%s", a);

cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );

dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );

printf("%sn", a);
return EXIT_SUCCESS;






share|improve this answer












I found a solution myself:



CMakeLists.txt:



cmake_minimum_required(VERSION 2.8.3)
PROJECT (beginner_tutorials)
FIND_PACKAGE(CUDA REQUIRED)

find_package(catkin REQUIRED COMPONENTS
roscpp
rospy
std_msgs
)

SET(CUDA_NVCC_FLAGS "-arch=sm_13" CACHE STRING "nvcc flags" FORCE)
SET (CUDA_VERBOSE_BUILD ON CACHE BOOL "nvcc verbose" FORCE)
SET(LIB_TYPE STATIC)
CUDA_ADD_LIBRARY(TestLib $LIB_TYPE src/helloWorld.cu)

catkin_package(
)
include_directories(
$catkin_INCLUDE_DIRS
)

ADD_EXECUTABLE(beginner_tutorials_node src/main.cpp)
ADD_DEPENDENCIES(beginner_tutorials_node TestLib)
TARGET_LINK_LIBRARIES(beginner_tutorials_node
$catkin_LIBRARIES
$PCL_LIBRARIES
TestLib
)


main.cpp:



int testmain();

int main()

testmain();
return 0;



helloWorld.cu:



#include <stdio.h>

#include <cuda.h>
#include <cuda_runtime.h>

const int N = 7;
const int blocksize = 7;

__global__
void hello(char *a, int *b)

a[threadIdx.x] += b[threadIdx.x];


int testmain()

char a[N] = "Hello ";
int b[N] = 15, 10, 6, 0, -11, 1, 0;

char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);

printf("%s", a);

cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );

dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );

printf("%sn", a);
return EXIT_SUCCESS;







share|improve this answer












share|improve this answer



share|improve this answer










answered Sep 9 '14 at 15:27









user2333894user2333894

6928




6928











  • For anyone else stumbling accross this: It works just as well with a shared library, simply replace
    – Germanunkol
    Sep 5 '18 at 7:28
















  • For anyone else stumbling accross this: It works just as well with a shared library, simply replace
    – Germanunkol
    Sep 5 '18 at 7:28















For anyone else stumbling accross this: It works just as well with a shared library, simply replace
– Germanunkol
Sep 5 '18 at 7:28




For anyone else stumbling accross this: It works just as well with a shared library, simply replace
– Germanunkol
Sep 5 '18 at 7:28

















draft saved

draft discarded
















































Thanks for contributing an answer to Stack Overflow!


  • Please be sure to answer the question. Provide details and share your research!

But avoid


  • Asking for help, clarification, or responding to other answers.

  • Making statements based on opinion; back them up with references or personal experience.

To learn more, see our tips on writing great answers.





Some of your past answers have not been well-received, and you're in danger of being blocked from answering.


Please pay close attention to the following guidance:


  • Please be sure to answer the question. Provide details and share your research!

But avoid


  • Asking for help, clarification, or responding to other answers.

  • Making statements based on opinion; back them up with references or personal experience.

To learn more, see our tips on writing great answers.




draft saved


draft discarded














StackExchange.ready(
function ()
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f25748039%2fadd-cuda-to-ros-package%23new-answer', 'question_page');

);

Post as a guest















Required, but never shown





















































Required, but never shown














Required, but never shown












Required, but never shown







Required, but never shown

































Required, but never shown














Required, but never shown












Required, but never shown







Required, but never shown







Popular posts from this blog

Use pre created SQLite database for Android project in kotlin

Darth Vader #20

Ondo