It is really easy to create unit tests in general but it will take some practice to master this new concept, if functions has parameters and return values, then things are straight forward , like the first example. here are a few more examples

app/dummy.h

#ifndef __DUMMY_H__
#define __DUMMY_H__

uint32_t sum( uint32_t a, uint32_t b );
uint32_t sub( uint32_t a, uint32_t b );
uint32_t mul( uint32_t a, uint32_t b );

#endif // __DUMMY_H_

app/dummy.c

#include <stdint.h>
#include "dummy.h"

uint32_t sum( uint32_t a, uint32_t b )
{
    return a + b;
}

uint32_t sub( uint32_t a, uint32_t b )
{
    return a - b;
}

uint32_t mul( uint32_t a, uint32_t b )
{
    return a * b;
}

you can add as many test cases you need for a single function, it is my recommendation to follow some name convection like.

void test__name_of_function_to_test__name_of_test( void )

test/test_dummy.c

#include "unity.h"
#include "dummy.h"

void setUp(void)
{
}

void tearDown(void)
{
}

/*test case 1: for function sum*/
void test__sum__2_plus_3_equal_5(void)
{
    uint32_t res = sum( 2, 3 );
    TEST_ASSERT_EQUAL_MESSAGE( 5, res, "2 + 3 = 5" );
}

/*test case 2: for function sum*/
void test__sum__100_plus_50_equal_150(void)
{
    uint32_t res = sum( 100, 50 );
    TEST_ASSERT_EQUAL_MESSAGE( 150, res, "100 + 50 = 150" );
}

/*test case 1: for function sub*/
void test__sub__100_minus_50_equal_50(void)
{
    uint32_t res = sub( 100, 50 );
    TEST_ASSERT_EQUAL_MESSAGE( 50, res, "100 - 50 = 50" );
}

/*test case 2: for function sub*/
void test__sub__3_minus_2_equal_minus_1(void)
{
    uint32_t res = sub( 3, 2 );
    TEST_ASSERT_EQUAL_MESSAGE( 1, res, "3 - 2 = 1" );
}

/*test case 1: for function mul*/
void test__mul__2_by_3_equal_6(void)
{
    uint32_t res = mul( 2, 3 );
    TEST_ASSERT_EQUAL_MESSAGE( 6, res, "2 * 3 = 6" );
}

/*test case 2: for function mul*/
void test__mul__100_by_50_equal_5000(void)
{
    uint32_t res = mul( 100, 50 );
    TEST_ASSERT_EQUAL_MESSAGE( 5000, res, "100 * 50 = 5000" );
}

test output

$ ceedling test:all


Test 'test_dummy.c'
-------------------
Generating runner for test_dummy.c...
Compiling test_dummy_runner.c...
Compiling test_dummy.c...
Compiling unity.c...
Compiling dummy.c...
Compiling CException.c...
Compiling cmock.c...
Linking test_dummy.exe...
Running test_dummy.exe...
test_dummy.c:12:test__sum__2_plus_3_equal_5:PASS
test_dummy.c:18:test__sum__100_plus_50_equal_150:PASS
test_dummy.c:24:test__sub__100_minus_50_equal_50:PASS
test_dummy.c:30:test__sub__3_minus_2_equal_minus_1:PASS
test_dummy.c:36:test__mul__2_by_3_equal_6:PASS
test_dummy.c:42:test__mul__100_by_50_equal_5000:PASS

-----------------------
6 Tests 0 Failures 0 Ignored
OK

The more test you add the more messy the output you will get, certainly you need the result information, but you will only care about those test cases that fails, (because are the one you need to fix), ceedling has an option to simplify the output information adding what is called plugins, on project.yml add the lines

:project:
  :build_root: Build/ceedling/  # Directory where ceedling will place its output
  :release_build: TRUE

:paths:
  :test:
    - test/**     # directory where the unit testing are
  :source:
    - app/**      # directory where the functions to test are

:plugins: 
  :load_paths:
    - "#{Ceedling.load_path}"
  :enabled:
    - stdout_pretty_tests_report

Same test output but pretty and simplified

$ ceedling test:all


Test 'test_dummy.c'
-------------------
Generating runner for test_dummy.c...
Compiling test_dummy_runner.c...
Compiling test_dummy.c...
Compiling unity.c...
Compiling dummy.c...
Compiling CException.c...
Compiling cmock.c...
Linking test_dummy.exe...
Running test_dummy.exe...

--------------------
OVERALL TEST SUMMARY
--------------------
TESTED:  6
PASSED:  6
FAILED:  0
IGNORED: 0

force an error

void test__sum__error_result(void)
{
    uint32_t res = sum( 2, 3 );
    TEST_ASSERT_EQUAL_MESSAGE( 6, res, "2 + 3 = 6" );
}

Test output indicating the number of test pass the number of test fails and details about the failed test case

$ ceedling test:all


Test 'test_dummy.c'
-------------------
Generating runner for test_dummy.c...
Compiling test_dummy_runner.c...
Compiling test_dummy.c...
Linking test_dummy.exe...
Running test_dummy.exe...

-------------------
FAILED TEST SUMMARY
-------------------
[test_dummy.c]
  Test: test__sum__error_result
  At line (57): "Expected 6 Was 5. 2 + 3 = 6"

--------------------
OVERALL TEST SUMMARY
--------------------
TESTED:  7
PASSED:  6
FAILED:  1
IGNORED: 0

---------------------
BUILD FAILURE SUMMARY
---------------------
Unit test failures.

Testing inline functions

Sometimes you gonna have inline functions and as per MISRA and other things you will need to declare them as static. Ceedling has no “problemo” testing this kind if function in the usual way.

#ifndef __DUMMY_H__
#define __DUMMY_H__

uint32_t sum( uint32_t a, uint32_t b );
uint32_t sub( uint32_t a, uint32_t b );
uint32_t mul( uint32_t a, uint32_t b );

static inline uint32_t increment( uint32_t a )
{
    return a + 1;
}

#endif // __DUMMY_H_

As you can see, the test is pretty much the same as with regular functions

void test__increment__one_integer(void)
{
    uint32_t res = increment( 2 );
    TEST_ASSERT_EQUAL_MESSAGE( 3, res, "2++ = 3" );
}

There is only one thing you need to keep in mind, since the inline function most of the time are declared in header files you need to run the test from scratch (clean compilation), and to achieve it you need to type first ceedling clobber (we didn’t remove the error case)

$ ceedling clobber
$ ceedling test:all


Test 'test_dummy.c'
-------------------
Generating runner for test_dummy.c...
Compiling test_dummy_runner.c...
Compiling test_dummy.c...
Linking test_dummy.exe...
Running test_dummy.exe...

-------------------
FAILED TEST SUMMARY
-------------------
[test_dummy.c]
  Test: test__sum__error_result
  At line (57): "Expected 6 Was 5. 2 + 3 = 6"

--------------------
OVERALL TEST SUMMARY
--------------------
TESTED:  8
PASSED:  7
FAILED:  1
IGNORED: 0

---------------------
BUILD FAILURE SUMMARY
---------------------
Unit test failures.